summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjc_gargma <jc_gargma@iserlohn-fortress.net>2018-05-03 10:59:06 -0700
committerjc_gargma <jc_gargma@iserlohn-fortress.net>2018-05-03 10:59:06 -0700
commit2ce815cc7c05ced42e671a1b31eac2055672ec32 (patch)
treec73a601d932b1d5b66b2909c9bd8602c40fda0dc
downloadlinux-libre-hardened-ck-2ce815cc7c05ced42e671a1b31eac2055672ec32.tar.xz
Initial commit
-rw-r--r--60-linux.hook12
-rw-r--r--90-linux.hook11
-rw-r--r--PKGBUILD301
-rw-r--r--config.x86_649632
-rw-r--r--drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch39
-rw-r--r--fix-vboxguest-on-guests-with-more-than-4G-RAM.patch549
-rw-r--r--linux.install11
-rw-r--r--linux.preset14
-rw-r--r--patch-4.16-ck1.patch12161
-rw-r--r--regdom-hack-4.16.6.patch63
10 files changed, 22793 insertions, 0 deletions
diff --git a/60-linux.hook b/60-linux.hook
new file mode 100644
index 0000000..b33873c
--- /dev/null
+++ b/60-linux.hook
@@ -0,0 +1,12 @@
+[Trigger]
+Type = File
+Operation = Install
+Operation = Upgrade
+Operation = Remove
+Target = usr/lib/modules/%KERNVER%/*
+Target = usr/lib/modules/%EXTRAMODULES%/*
+
+[Action]
+Description = Updating %PKGBASE% module dependencies...
+When = PostTransaction
+Exec = /usr/bin/depmod %KERNVER%
diff --git a/90-linux.hook b/90-linux.hook
new file mode 100644
index 0000000..be0d886
--- /dev/null
+++ b/90-linux.hook
@@ -0,0 +1,11 @@
+[Trigger]
+Type = File
+Operation = Install
+Operation = Upgrade
+Target = boot/vmlinuz-%PKGBASE%
+Target = usr/lib/initcpio/*
+
+[Action]
+Description = Updating %PKGBASE% initcpios...
+When = PostTransaction
+Exec = /usr/bin/mkinitcpio -p %PKGBASE%
diff --git a/PKGBUILD b/PKGBUILD
new file mode 100644
index 0000000..bfef2bb
--- /dev/null
+++ b/PKGBUILD
@@ -0,0 +1,301 @@
+# Maintainer: Levente Polyak <anthraxx[at]archlinux[dot]org>
+# Contributor: Daniel Micay <danielmicay@gmail.com>
+# Contributor: Tobias Powalowski <tpowa@archlinux.org>
+# Contributor: Thomas Baechler <thomas@archlinux.org>
+
+pkgbase=linux-libre-hardened-ck
+_majver=4.16
+_minver=6
+_fullver=${_majver}.${_minver}
+_basever=${_majver}-gnu
+_pkgver=${_fullver}-gnu
+_hardver=a
+_NUMAdisable=y
+_ckpatchversion=1
+_ckpatchname="patch-4.16-ck${_ckpatchversion}"
+_gcc_more_v='20180310'
+_srcname=linux-${_majver}
+pkgver=${_fullver}.${_hardver}
+pkgrel=1
+url='https://github.com/anthraxx/linux-hardened'
+#url='http://ck.kolivas.org/patches/'
+arch=('x86_64')
+license=('GPL2')
+makedepends=('xmlto' 'kmod' 'inetutils' 'bc' 'libelf')
+options=('!strip')
+source=(https://linux-libre.fsfla.org/pub/linux-libre/releases/${_basever}/linux-libre-${_basever}.tar.xz{,.sign}
+ https://linux-libre.fsfla.org/pub/linux-libre/releases/${_pkgver}/patch-${_basever}-${_pkgver}.xz{,.sign}
+ https://github.com/anthraxx/linux-hardened/releases/download/${_fullver}.${_hardver}/linux-hardened-${_fullver}.${_hardver}.patch{,.sig}
+ patch-4.16-ck1.patch
+ #http://ck.kolivas.org/patches/4.0/4.16/4.16-ck${_ckpatchversion}/${_ckpatchname}.xz
+ enable_additional_cpu_optimizations-$_gcc_more_v.tar.gz::https://github.com/graysky2/kernel_gcc_patch/archive/$_gcc_more_v.tar.gz
+ #https://github.com/graysky2/kernel_gcc_patch/raw/master/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v4.13+.patch
+ regdom-hack-${_fullver}.patch
+ config.x86_64 # the main kernel config files
+ 60-linux.hook # pacman hook for depmod
+ 90-linux.hook # pacman hook for initramfs regeneration
+ linux.preset # standard config files for mkinitcpio ramdisk
+
+ # https://bugs.archlinux.org/task/56711
+ drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch
+ fix-vboxguest-on-guests-with-more-than-4G-RAM.patch
+)
+sha256sums=('5bd4ee95a0a9d7d2f962504f4b684b441b07f5a136e8bb8ad959f2d29ab9b913'
+ 'SKIP'
+ '07f671e3bf9ea10918f146c1fd96eaa6da6410e91256665f3d36699de9952f65'
+ 'SKIP'
+ '587477338e3c34e31c8d895bae9bed6847d6b0b5ad318460733ee9213730315f'
+ 'SKIP'
+ '7fa37d0804ede48520c6f2ff9c0cff7e1747c202ee566198cdf2eeeaf6ae312f'
+ 'b2c1292e06544465b636543e6ac8a01959470d32ce3664460721671f1347c815'
+ 'e7ebf050c22bcec0028c0b3c79fd6d3913b0370ecc6a23dfe78ce475630cf503'
+ 'c25b9b025ae48a2855eae0eda28bc1463603d209e6f0f8a2ab379a077115361a'
+ 'ae2e95db94ef7176207c690224169594d49445e04249d2499e9d2fbc117a0b21'
+ '75f99f5239e03238f88d1a834c50043ec32b1dc568f2cc291b07d04718483919'
+ 'ad6344badc91ad0630caacde83f7f9b97276f80d26a20619a87952be65492c65'
+ 'c08d12c699398ef88b764be1837b9ee11f2efd3188bd1bf4e8f85dfbeee58148'
+ 'b1c1cf770b2baab046d52687ec3dd83c543e3f45b4abeae2686c814673e0a1c5')
+validpgpkeys=(
+ '474402C8C582DAFBE389C427BCB7CF877E7D47A7' # Alexandre Oliva
+ '65EEFE022108E2B708CBFCF7F9E712E59AF5F22A' # Daniel Micay
+ 'E240B57E2C4630BA768E2F26FC1B547C8D8172C8' # Levente Polyak
+ )
+_kernelname=${pkgbase#linux-libre}
+: ${_kernelname:=-ARCH}
+
+prepare() {
+ cd ${_srcname}
+
+ # add upstream patch
+ msg2 "Applying upstream patch"
+ patch -p1 -i "${srcdir}/patch-${_basever}-${_pkgver}"
+
+ # Hotfixes
+ msg2 "Applying hotfixes"
+ patch -p1 -i "${srcdir}/drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch"
+ patch -p1 -i "${srcdir}/fix-vboxguest-on-guests-with-more-than-4G-RAM.patch"
+
+ # fix naming schema in EXTRAVERSION of ck patch set
+ #sed -i -re "s/^(.EXTRAVERSION).*$/\1 = /" "../${_ckpatchname}"
+
+ # linux hardened patch
+ msg2 "Applying hardened patch"
+ patch -p1 -i "${srcdir}/linux-hardened-${pkgver}.patch"
+
+ # Patch source with ck patchset
+ msg2 "Applying ck patch"
+ #patch -p1 -i "${srcdir}/${_ckpatchname}"
+ patch -p1 -i "${srcdir}/${_ckpatchname}.patch"
+
+ # Patch source to unlock additional gcc CPU optimizatons
+ # https://github.com/graysky2/kernel_gcc_patch
+ msg2 "Applying graysky2 patch"
+ patch -p1 -i "${srcdir}/kernel_gcc_patch-$_gcc_more_v/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v4.13+.patch"
+
+ # graysky2 gcc patch
+ #patch -p1 -i "${srcdir}/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v4.13+.patch"
+
+ # Ignore ath9k eeprom patch
+ patch -p1 -i "${srcdir}/regdom-hack-${_fullver}.patch"
+
+
+ # add latest fixes from stable queue, if needed
+ # http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
+
+ cat ../config.x86_64 - >.config <<END
+CONFIG_LOCALVERSION="${_kernelname}"
+CONFIG_LOCALVERSION_AUTO=n
+END
+
+ # set extraversion to pkgrel and empty localversion
+ sed -e "/^EXTRAVERSION =/s/=.*/= -${pkgrel}/" \
+ -e "/^EXTRAVERSION =/aLOCALVERSION =" \
+ -i Makefile
+
+ ### Optionally disable NUMA for 64-bit kernels only
+ # (x86 kernels do not support NUMA)
+ if [ -n "$_NUMAdisable" ]; then
+ msg "Disabling NUMA from kernel config..."
+ sed -i -e 's/CONFIG_NUMA=y/# CONFIG_NUMA is not set/' \
+ -i -e '/CONFIG_AMD_NUMA=y/d' \
+ -i -e '/CONFIG_X86_64_ACPI_NUMA=y/d' \
+ -i -e '/CONFIG_NODES_SPAN_OTHER_NODES=y/d' \
+ -i -e '/# CONFIG_NUMA_EMU is not set/d' \
+ -i -e '/CONFIG_NODES_SHIFT=2/d' \
+ -i -e '/CONFIG_NEED_MULTIPLE_NODES=y/d' \
+ -i -e '/# CONFIG_MOVABLE_NODE is not set/d' \
+ -i -e '/CONFIG_USE_PERCPU_NUMA_NODE_ID=y/d' \
+ -i -e '/CONFIG_ACPI_NUMA=y/d' ./.config
+ fi
+
+ # don't run depmod on 'make install'. We'll do this ourselves in packaging
+ sed -i '2iexit 0' scripts/depmod.sh
+
+ # get kernel version
+ make prepare
+
+ # load configuration
+ # Configure the kernel. Replace the line below with one of your choice.
+ #make menuconfig # CLI menu for configuration
+ #make nconfig # new CLI menu for configuration
+ #make xconfig # X-based configuration
+ #make oldconfig # using old config from previous kernel version
+ # ... or manually edit .config
+
+ # rewrite configuration
+ yes "" | make config >/dev/null
+}
+
+build() {
+ cd ${_srcname}
+
+ make bzImage modules
+}
+
+_package() {
+ pkgdesc="The ${pkgbase/linux/Linux} kernel and modules"
+ [ "${pkgbase}" = "linux-hardened" ] && groups=('base')
+ depends=('coreutils' 'linux-libre-firmware' 'kmod' 'mkinitcpio>=0.7')
+ optdepends=('crda: to set the correct wireless channels of your country')
+ backup=("etc/mkinitcpio.d/${pkgbase}.preset")
+ install=linux.install
+
+ cd ${_srcname}
+
+ # get kernel version
+ _kernver="$(make kernelrelease)"
+ _basekernel=${_kernver%%-*}
+ _basekernel=${_basekernel%.*}
+
+ mkdir -p "${pkgdir}"/{boot,usr/lib/modules}
+ make INSTALL_MOD_PATH="${pkgdir}/usr" modules_install
+ cp arch/x86/boot/bzImage "${pkgdir}/boot/vmlinuz-${pkgbase}"
+
+ # make room for external modules
+ local _extramodules="extramodules-${_basekernel}${_kernelname}"
+ ln -s "../${_extramodules}" "${pkgdir}/usr/lib/modules/${_kernver}/extramodules"
+
+ # add real version for building modules and running depmod from hook
+ echo "${_kernver}" |
+ install -Dm644 /dev/stdin "${pkgdir}/usr/lib/modules/${_extramodules}/version"
+
+ # remove build and source links
+ rm "${pkgdir}"/usr/lib/modules/${_kernver}/{source,build}
+
+ # now we call depmod...
+ depmod -b "${pkgdir}/usr" -F System.map "${_kernver}"
+
+ # add vmlinux
+ install -Dt "${pkgdir}/usr/lib/modules/${_kernver}/build" -m644 vmlinux
+
+ # sed expression for following substitutions
+ local _subst="
+ s|%PKGBASE%|${pkgbase}|g
+ s|%KERNVER%|${_kernver}|g
+ s|%EXTRAMODULES%|${_extramodules}|g
+ "
+
+ # hack to allow specifying an initially nonexisting install file
+ sed "${_subst}" "${startdir}/${install}" > "${startdir}/${install}.pkg"
+ true && install=${install}.pkg
+
+ # install mkinitcpio preset file
+ sed "${_subst}" ../linux.preset |
+ install -Dm644 /dev/stdin "${pkgdir}/etc/mkinitcpio.d/${pkgbase}.preset"
+
+ # install pacman hooks
+ sed "${_subst}" ../60-linux.hook |
+ install -Dm644 /dev/stdin "${pkgdir}/usr/share/libalpm/hooks/60-${pkgbase}.hook"
+ sed "${_subst}" ../90-linux.hook |
+ install -Dm644 /dev/stdin "${pkgdir}/usr/share/libalpm/hooks/90-${pkgbase}.hook"
+}
+
+_package-headers() {
+ pkgdesc="Header files and scripts for building modules for ${pkgbase/linux/Linux} kernel"
+
+ cd ${_srcname}
+ local _builddir="${pkgdir}/usr/lib/modules/${_kernver}/build"
+
+ install -Dt "${_builddir}" -m644 Makefile .config Module.symvers
+ install -Dt "${_builddir}/kernel" -m644 kernel/Makefile
+
+ mkdir "${_builddir}/.tmp_versions"
+
+ cp -t "${_builddir}" -a include scripts
+
+ install -Dt "${_builddir}/arch/x86" -m644 arch/x86/Makefile
+ install -Dt "${_builddir}/arch/x86/kernel" -m644 arch/x86/kernel/asm-offsets.s
+
+ cp -t "${_builddir}/arch/x86" -a arch/x86/include
+
+ install -Dt "${_builddir}/drivers/md" -m644 drivers/md/*.h
+ install -Dt "${_builddir}/net/mac80211" -m644 net/mac80211/*.h
+
+ # http://bugs.archlinux.org/task/13146
+ install -Dt "${_builddir}/drivers/media/i2c" -m644 drivers/media/i2c/msp3400-driver.h
+
+ # http://bugs.archlinux.org/task/20402
+ install -Dt "${_builddir}/drivers/media/usb/dvb-usb" -m644 drivers/media/usb/dvb-usb/*.h
+ install -Dt "${_builddir}/drivers/media/dvb-frontends" -m644 drivers/media/dvb-frontends/*.h
+ install -Dt "${_builddir}/drivers/media/tuners" -m644 drivers/media/tuners/*.h
+
+ # add xfs and shmem for aufs building
+ mkdir -p "${_builddir}"/{fs/xfs,mm}
+
+ # copy in Kconfig files
+ find . -name Kconfig\* -exec install -Dm644 {} "${_builddir}/{}" \;
+
+ # add objtool for external module building and enabled VALIDATION_STACK option
+ install -Dt "${_builddir}/tools/objtool" tools/objtool/objtool
+
+ # remove unneeded architectures
+ local _arch
+ for _arch in "${_builddir}"/arch/*/; do
+ [[ ${_arch} == */x86/ ]] && continue
+ rm -r "${_arch}"
+ done
+
+ # remove files already in linux-libre-docs package
+ rm -r "${_builddir}/Documentation"
+
+ # remove now broken symlinks
+ find -L "${_builddir}" -type l -printf 'Removing %P\n' -delete
+
+ # Fix permissions
+ chmod -R u=rwX,go=rX "${_builddir}"
+
+ # strip scripts directory
+ local _binary _strip
+ while read -rd '' _binary; do
+ case "$(file -bi "${_binary}")" in
+ *application/x-sharedlib*) _strip="${STRIP_SHARED}" ;; # Libraries (.so)
+ *application/x-archive*) _strip="${STRIP_STATIC}" ;; # Libraries (.a)
+ *application/x-executable*) _strip="${STRIP_BINARIES}" ;; # Binaries
+ *) continue ;;
+ esac
+ /usr/bin/strip ${_strip} "${_binary}"
+ done < <(find "${_builddir}/scripts" -type f -perm -u+w -print0 2>/dev/null)
+}
+
+_package-docs() {
+ pkgdesc="Kernel hackers manual - HTML documentation that comes with the ${pkgbase/linux/Linux} kernel"
+
+ cd ${_srcname}
+ local _builddir="${pkgdir}/usr/lib/modules/${_kernver}/build"
+
+ mkdir -p "${_builddir}"
+ cp -t "${_builddir}" -a Documentation
+
+ # Fix permissions
+ chmod -R u=rwX,go=rX "${_builddir}"
+}
+
+pkgname=("${pkgbase}" "${pkgbase}-headers" "${pkgbase}-docs")
+for _p in ${pkgname[@]}; do
+ eval "package_${_p}() {
+ $(declare -f "_package${_p#${pkgbase}}")
+ _package${_p#${pkgbase}}
+ }"
+done
+
+# vim:set ts=8 sts=2 sw=2 et:
diff --git a/config.x86_64 b/config.x86_64
new file mode 100644
index 0000000..150957f
--- /dev/null
+++ b/config.x86_64
@@ -0,0 +1,9632 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/x86 4.16.6 Kernel Configuration
+#
+CONFIG_64BIT=y
+CONFIG_X86_64=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf64-x86-64"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_ARCH_MMAP_RND_BITS_MIN=28
+CONFIG_ARCH_MMAP_RND_BITS_MAX=32
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
+CONFIG_ZONE_DMA32=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_HAVE_INTEL_TXT=y
+CONFIG_X86_64_SMP=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_PGTABLE_LEVELS=4
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+CONFIG_THREAD_INFO_IN_TASK=y
+
+#
+# General setup
+#
+CONFIG_SCHED_MUQSS=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+# CONFIG_COMPILE_TEST is not set
+CONFIG_LOCALVERSION="-hardened"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_HAVE_KERNEL_LZ4=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+CONFIG_KERNEL_XZ=y
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
+CONFIG_DEFAULT_HOSTNAME="archlinux"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_SIM=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
+CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
+CONFIG_IRQ_FORCED_THREADING=y
+# CONFIG_FORCE_IRQ_THREADING is not set
+CONFIG_SPARSE_IRQ=y
+# CONFIG_GENERIC_IRQ_DEBUGFS is not set
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_ARCH_CLOCKSOURCE_DATA=y
+CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+# CONFIG_NO_HZ_IDLE is not set
+CONFIG_NO_HZ_FULL=y
+# CONFIG_NO_HZ_FULL_ALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CPU_ISOLATION=y
+
+#
+# RCU Subsystem
+#
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_EXPERT=y
+CONFIG_SRCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_TASKS_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_NEED_SEGCBLIST=y
+CONFIG_CONTEXT_TRACKING=y
+# CONFIG_CONTEXT_TRACKING_FORCE is not set
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_DELAY=500
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_BUILD_BIN2C=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
+CONFIG_ARCH_SUPPORTS_INT128=y
+CONFIG_CGROUPS=y
+CONFIG_PAGE_COUNTER=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_SWAP_ENABLED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+CONFIG_CGROUP_WRITEBACK=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_SOCK_CGROUP_DATA=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_RD_LZ4=y
+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+# CONFIG_LOCAL_INIT is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_BPF=y
+CONFIG_EXPERT=y
+# CONFIG_UID16 is not set
+CONFIG_MULTIUSER=y
+CONFIG_SGETMASK_SYSCALL=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_FHANDLE=y
+CONFIG_POSIX_TIMERS=y
+CONFIG_PRINTK=y
+CONFIG_PRINTK_NMI=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_FUTEX_PI=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_ADVISE_SYSCALLS=y
+CONFIG_MEMBARRIER=y
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
+CONFIG_KALLSYMS_BASE_RELATIVE=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_USERFAULTFD is not set
+CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_HAVE_PERF_EVENTS=y
+# CONFIG_PC104 is not set
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SLAB_HARDENED=y
+CONFIG_SLAB_CANARY=y
+CONFIG_SLAB_SANITIZE=y
+CONFIG_SLAB_SANITIZE_VERIFY=y
+CONFIG_SLUB_CPU_PARTIAL=y
+CONFIG_SYSTEM_DATA_VERIFICATION=y
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_CRASH_CORE=y
+CONFIG_KEXEC_CORE=y
+CONFIG_OPROFILE=m
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_STATIC_KEYS_SELFTEST is not set
+CONFIG_OPTPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_UPROBES=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
+CONFIG_HAVE_NMI=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
+CONFIG_ARCH_HAS_SET_MEMORY=y
+CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
+CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
+CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_GCC_PLUGINS=y
+# CONFIG_GCC_PLUGINS is not set
+CONFIG_HAVE_CC_STACKPROTECTOR=y
+# CONFIG_CC_STACKPROTECTOR_NONE is not set
+# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
+# CONFIG_CC_STACKPROTECTOR_STRONG is not set
+CONFIG_CC_STACKPROTECTOR_AUTO=y
+CONFIG_THIN_ARCHIVES=y
+CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
+CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
+CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_HAVE_ARCH_SOFT_DIRTY=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
+CONFIG_HAVE_EXIT_THREAD=y
+CONFIG_ARCH_MMAP_RND_BITS=32
+CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
+CONFIG_HAVE_COPY_THREAD_TLS=y
+CONFIG_HAVE_STACK_VALIDATION=y
+# CONFIG_HAVE_ARCH_HASH is not set
+# CONFIG_ISA_BUS_API is not set
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_COMPAT_OLD_SIGACTION=y
+# CONFIG_CPU_NO_EFFICIENT_FFS is not set
+CONFIG_HAVE_ARCH_VMAP_STACK=y
+CONFIG_VMAP_STACK=y
+# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
+# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
+CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
+CONFIG_STRICT_MODULE_RWX=y
+CONFIG_ARCH_HAS_PHYS_TO_DMA=y
+CONFIG_ARCH_HAS_REFCOUNT=y
+CONFIG_REFCOUNT_FULL=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_MODULE_SIG is not set
+CONFIG_MODULE_COMPRESS=y
+# CONFIG_MODULE_COMPRESS_GZIP is not set
+CONFIG_MODULE_COMPRESS_XZ=y
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_BLOCK=y
+CONFIG_BLK_SCSI_REQUEST=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_DEV_THROTTLING_LOW=y
+# CONFIG_BLK_CMDLINE_PARSER is not set
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_WBT_MQ=y
+CONFIG_BLK_DEBUG_FS=y
+CONFIG_BLK_SED_OPAL=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_AIX_PARTITION=y
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+CONFIG_BLOCK_COMPAT=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLK_MQ_VIRTIO=y
+CONFIG_BLK_MQ_RDMA=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_MQ_IOSCHED_DEADLINE=y
+CONFIG_MQ_IOSCHED_KYBER=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_PREEMPT_NOTIFIERS=y
+CONFIG_PADATA=y
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_FEATURE_NAMES=y
+CONFIG_X86_FAST_FEATURE_TESTS=y
+CONFIG_X86_X2APIC=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_GOLDFISH is not set
+CONFIG_RETPOLINE=y
+CONFIG_INTEL_RDT=y
+# CONFIG_X86_EXTENDED_PLATFORM is not set
+CONFIG_X86_INTEL_LPSS=y
+CONFIG_X86_AMD_PLATFORM_DEVICE=y
+CONFIG_IOSF_MBI=y
+# CONFIG_IOSF_MBI_DEBUG is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+CONFIG_PARAVIRT_SPINLOCKS=y
+# CONFIG_QUEUED_LOCK_STAT is not set
+CONFIG_XEN=y
+CONFIG_XEN_PV=y
+CONFIG_XEN_PV_SMP=y
+CONFIG_XEN_DOM0=y
+CONFIG_XEN_PVHVM=y
+CONFIG_XEN_PVHVM_SMP=y
+CONFIG_XEN_512GB=y
+CONFIG_XEN_SAVE_RESTORE=y
+# CONFIG_XEN_DEBUG_FS is not set
+CONFIG_XEN_PVH=y
+CONFIG_KVM_GUEST=y
+# CONFIG_KVM_DEBUG_FS is not set
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_PARAVIRT_CLOCK=y
+CONFIG_JAILHOUSE_GUEST=y
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MK8 is not set
+# CONFIG_MK8SSE3 is not set
+# CONFIG_MK10 is not set
+# CONFIG_MBARCELONA is not set
+# CONFIG_MBOBCAT is not set
+# CONFIG_MJAGUAR is not set
+# CONFIG_MBULLDOZER is not set
+# CONFIG_MPILEDRIVER is not set
+# CONFIG_MSTEAMROLLER is not set
+# CONFIG_MEXCAVATOR is not set
+# CONFIG_MZEN is not set
+# CONFIG_MPSC is not set
+# CONFIG_MATOM is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MNEHALEM is not set
+# CONFIG_MWESTMERE is not set
+# CONFIG_MSILVERMONT is not set
+# CONFIG_MSANDYBRIDGE is not set
+# CONFIG_MIVYBRIDGE is not set
+# CONFIG_MHASWELL is not set
+# CONFIG_MBROADWELL is not set
+# CONFIG_MSKYLAKE is not set
+# CONFIG_MSKYLAKEX is not set
+CONFIG_GENERIC_CPU=y
+# CONFIG_MNATIVE is not set
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=64
+CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_PROCESSOR_SELECT=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+CONFIG_DMI=y
+CONFIG_GART_IOMMU=y
+CONFIG_CALGARY_IOMMU=y
+CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
+CONFIG_SWIOTLB=y
+CONFIG_IOMMU_HELPER=y
+# CONFIG_MAXSMP is not set
+CONFIG_NR_CPUS_RANGE_BEGIN=2
+CONFIG_NR_CPUS_RANGE_END=512
+CONFIG_NR_CPUS_DEFAULT=64
+CONFIG_NR_CPUS=320
+CONFIG_SCHED_SMT=y
+CONFIG_SMT_NICE=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_MC_PRIO=y
+# CONFIG_RQ_NONE is not set
+# CONFIG_RQ_SMT is not set
+CONFIG_RQ_MC=y
+# CONFIG_RQ_SMP is not set
+CONFIG_SHARERQ=2
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+CONFIG_X86_MCE=y
+# CONFIG_X86_MCELOG_LEGACY is not set
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+CONFIG_X86_MCE_THRESHOLD=y
+CONFIG_X86_MCE_INJECT=m
+CONFIG_X86_THERMAL_VECTOR=y
+
+#
+# Performance monitoring
+#
+CONFIG_PERF_EVENTS_INTEL_UNCORE=m
+CONFIG_PERF_EVENTS_INTEL_RAPL=m
+CONFIG_PERF_EVENTS_INTEL_CSTATE=m
+CONFIG_PERF_EVENTS_AMD_POWER=m
+# CONFIG_VM86 is not set
+CONFIG_X86_VSYSCALL_EMULATION=y
+CONFIG_I8K=m
+CONFIG_MICROCODE=y
+# CONFIG_MICROCODE_INTEL is not set
+# CONFIG_MICROCODE_AMD is not set
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=m
+CONFIG_X86_CPUID=m
+# CONFIG_X86_5LEVEL is not set
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_X86_DIRECT_GBPAGES=y
+CONFIG_ARCH_HAS_MEM_ENCRYPT=y
+CONFIG_AMD_MEM_ENCRYPT=y
+CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y
+CONFIG_ARCH_USE_MEMREMAP_PROT=y
+CONFIG_NUMA=y
+CONFIG_AMD_NUMA=y
+CONFIG_X86_64_ACPI_NUMA=y
+CONFIG_NODES_SPAN_OTHER_NODES=y
+# CONFIG_NUMA_EMU is not set
+CONFIG_NODES_SHIFT=2
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_HAVE_GENERIC_GUP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_MEMORY_ISOLATION=y
+CONFIG_HAVE_BOOTMEM_INFO_NODE=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+CONFIG_MEMORY_BALLOON=y
+CONFIG_BALLOON_COMPACTION=y
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
+CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
+CONFIG_ARCH_ENABLE_THP_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_MMU_NOTIFIER=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_HWPOISON_INJECT=m
+CONFIG_TRANSPARENT_HUGEPAGE=y
+# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_ARCH_WANTS_THP_SWAP=y
+CONFIG_THP_SWAP=y
+CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+# CONFIG_CMA is not set
+CONFIG_ZSWAP=y
+CONFIG_ZPOOL=y
+CONFIG_ZBUD=y
+CONFIG_Z3FOLD=y
+CONFIG_ZSMALLOC=y
+# CONFIG_PGTABLE_MAPPING is not set
+# CONFIG_ZSMALLOC_STAT is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
+# CONFIG_IDLE_PAGE_TRACKING is not set
+CONFIG_ARCH_HAS_ZONE_DEVICE=y
+CONFIG_ZONE_DEVICE=y
+CONFIG_ARCH_HAS_HMM=y
+CONFIG_MIGRATE_VMA_HELPER=y
+CONFIG_HMM=y
+CONFIG_HMM_MIRROR=y
+CONFIG_DEVICE_PRIVATE=y
+CONFIG_DEVICE_PUBLIC=y
+CONFIG_FRAME_VECTOR=y
+CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
+CONFIG_ARCH_HAS_PKEYS=y
+# CONFIG_PERCPU_STATS is not set
+# CONFIG_GUP_BENCHMARK is not set
+CONFIG_X86_PMEM_LEGACY_DEVICE=y
+CONFIG_X86_PMEM_LEGACY=m
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
+CONFIG_X86_RESERVE_LOW=64
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=0
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+CONFIG_ARCH_RANDOM=y
+CONFIG_X86_SMAP=y
+CONFIG_X86_INTEL_UMIP=y
+CONFIG_X86_INTEL_MPX=y
+CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EFI_MIXED=y
+CONFIG_SECCOMP=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250_NODEF is not set
+# CONFIG_HZ_300_NODEF is not set
+# CONFIG_HZ_1000_NODEF is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+# CONFIG_KEXEC is not set
+CONFIG_KEXEC_FILE=y
+# CONFIG_KEXEC_VERIFY_SIG is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x1000000
+CONFIG_RELOCATABLE=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_RANDOMIZE_MEMORY=y
+CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x1
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
+CONFIG_LEGACY_VSYSCALL_NONE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="audit=0"
+# CONFIG_CMDLINE_OVERRIDE is not set
+# CONFIG_MODIFY_LDT_SYSCALL is not set
+CONFIG_HAVE_LIVEPATCH=y
+# CONFIG_LIVEPATCH is not set
+CONFIG_ARCH_HAS_ADD_PAGES=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_USE_PERCPU_NUMA_NODE_ID=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_SUSPEND_SKIP_SYNC is not set
+CONFIG_HIBERNATE_CALLBACKS=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_DPM_WATCHDOG is not set
+CONFIG_PM_TRACE=y
+CONFIG_PM_TRACE_RTC=y
+CONFIG_PM_CLK=y
+CONFIG_PM_GENERIC_DOMAINS=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
+CONFIG_PM_GENERIC_DOMAINS_OF=y
+CONFIG_ACPI=y
+CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
+CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
+CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
+# CONFIG_ACPI_DEBUGGER is not set
+CONFIG_ACPI_SPCR_TABLE=y
+CONFIG_ACPI_LPIT=y
+CONFIG_ACPI_SLEEP=y
+# CONFIG_ACPI_PROCFS_POWER is not set
+CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
+# CONFIG_ACPI_EC_DEBUGFS is not set
+CONFIG_ACPI_AC=m
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_CPU_FREQ_PSS=y
+CONFIG_ACPI_PROCESSOR_CSTATE=y
+CONFIG_ACPI_PROCESSOR_IDLE=y
+CONFIG_ACPI_CPPC_LIB=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_PROCESSOR_AGGREGATOR=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_ACPI_NUMA=y
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
+CONFIG_ACPI_TABLE_UPGRADE=y
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_CONTAINER=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_HOTPLUG_IOAPIC=y
+CONFIG_ACPI_SBS=m
+CONFIG_ACPI_HED=y
+# CONFIG_ACPI_CUSTOM_METHOD is not set
+CONFIG_ACPI_BGRT=y
+# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
+CONFIG_ACPI_NFIT=m
+CONFIG_HAVE_ACPI_APEI=y
+CONFIG_HAVE_ACPI_APEI_NMI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=m
+CONFIG_ACPI_APEI_ERST_DEBUG=m
+CONFIG_DPTF_POWER=m
+CONFIG_ACPI_WATCHDOG=y
+CONFIG_ACPI_EXTLOG=m
+CONFIG_PMIC_OPREGION=y
+CONFIG_CRC_PMIC_OPREGION=y
+CONFIG_XPOWER_PMIC_OPREGION=y
+CONFIG_BXT_WC_PMIC_OPREGION=y
+CONFIG_CHT_WC_PMIC_OPREGION=y
+CONFIG_CHT_DC_TI_PMIC_OPREGION=y
+CONFIG_ACPI_CONFIGFS=m
+CONFIG_TPS68470_PMIC_OPREGION=y
+CONFIG_X86_PM_TIMER=y
+CONFIG_SFI=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+
+#
+# CPU frequency scaling drivers
+#
+CONFIG_CPUFREQ_DT=m
+CONFIG_CPUFREQ_DT_PLATDEV=y
+CONFIG_X86_INTEL_PSTATE=y
+CONFIG_X86_PCC_CPUFREQ=m
+CONFIG_X86_ACPI_CPUFREQ=m
+CONFIG_X86_ACPI_CPUFREQ_CPB=y
+CONFIG_X86_POWERNOW_K8=m
+CONFIG_X86_AMD_FREQ_SENSITIVITY=m
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_P4_CLOCKMOD=m
+
+#
+# shared options
+#
+CONFIG_X86_SPEEDSTEP_LIB=m
+
+#
+# CPU Idle
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_XEN=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+CONFIG_PCIE_ECRC=y
+# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+CONFIG_PCIE_PME=y
+CONFIG_PCIE_DPC=y
+CONFIG_PCIE_PTM=y
+CONFIG_PCI_BUS_ADDR_T_64BIT=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_PCI_DEBUG is not set
+CONFIG_PCI_REALLOC_ENABLE_AUTO=y
+CONFIG_PCI_STUB=y
+CONFIG_XEN_PCIDEV_FRONTEND=m
+CONFIG_PCI_ATS=y
+CONFIG_PCI_LOCKLESS_CONFIG=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_PRI=y
+CONFIG_PCI_PASID=y
+CONFIG_PCI_LABEL=y
+CONFIG_PCI_HYPERV=m
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_HOTPLUG_PCI_ACPI_IBM=m
+CONFIG_HOTPLUG_PCI_CPCI=y
+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
+CONFIG_HOTPLUG_PCI_SHPC=m
+
+#
+# Cadence PCIe controllers support
+#
+CONFIG_PCIE_CADENCE=y
+CONFIG_PCIE_CADENCE_HOST=y
+CONFIG_PCIE_CADENCE_EP=y
+
+#
+# DesignWare PCI Core Support
+#
+# CONFIG_PCIE_DW_PLAT is not set
+
+#
+# PCI host controller drivers
+#
+CONFIG_VMD=m
+
+#
+# PCI Endpoint
+#
+CONFIG_PCI_ENDPOINT=y
+CONFIG_PCI_ENDPOINT_CONFIGFS=y
+# CONFIG_PCI_EPF_TEST is not set
+
+#
+# PCI switch controller drivers
+#
+CONFIG_PCI_SW_SWITCHTEC=m
+# CONFIG_ISA_BUS is not set
+CONFIG_ISA_DMA_API=y
+CONFIG_AMD_NB=y
+CONFIG_PCCARD=m
+CONFIG_PCMCIA=m
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=m
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+CONFIG_PD6729=m
+CONFIG_I82092=m
+CONFIG_PCCARD_NONSTATIC=y
+CONFIG_RAPIDIO=m
+CONFIG_RAPIDIO_TSI721=m
+CONFIG_RAPIDIO_DISC_TIMEOUT=30
+CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
+CONFIG_RAPIDIO_DMA_ENGINE=y
+# CONFIG_RAPIDIO_DEBUG is not set
+CONFIG_RAPIDIO_ENUM_BASIC=m
+CONFIG_RAPIDIO_CHMAN=m
+CONFIG_RAPIDIO_MPORT_CDEV=m
+
+#
+# RapidIO Switch drivers
+#
+CONFIG_RAPIDIO_TSI57X=m
+CONFIG_RAPIDIO_CPS_XX=m
+CONFIG_RAPIDIO_TSI568=m
+CONFIG_RAPIDIO_CPS_GEN2=m
+CONFIG_RAPIDIO_RXS_GEN3=m
+# CONFIG_X86_SYSFB is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_COMPAT_BINFMT_ELF=y
+CONFIG_ELFCORE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_IA32_EMULATION=y
+# CONFIG_IA32_AOUT is not set
+# CONFIG_X86_X32 is not set
+CONFIG_COMPAT_32=y
+CONFIG_COMPAT=y
+CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_X86_DEV_DMA_OPS=y
+CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_EGRESS=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_TLS=m
+CONFIG_XFRM=y
+CONFIG_XFRM_OFFLOAD=y
+CONFIG_XFRM_ALGO=m
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_IPCOMP=m
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IP_TUNNEL=m
+CONFIG_NET_IPGRE=m
+# CONFIG_NET_IPGRE_BROADCAST is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_NET_UDP_TUNNEL=m
+CONFIG_NET_FOU=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_NV=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_CDG=m
+CONFIG_TCP_CONG_BBR=m
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_IPV6_ILA=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_VTI=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SEG6_HMAC=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NET_PTP_CLASSIFY=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=m
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_FAMILY_BRIDGE=y
+CONFIG_NETFILTER_FAMILY_ARP=y
+CONFIG_NETFILTER_NETLINK_ACCT=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_COMMON=m
+CONFIG_NF_LOG_NETDEV=m
+CONFIG_NETFILTER_CONNCOUNT=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_LABELS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_BROADCAST=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_NF_NAT_PROTO_DCCP=y
+CONFIG_NF_NAT_PROTO_UDPLITE=y
+CONFIG_NF_NAT_PROTO_SCTP=y
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_REDIRECT=m
+CONFIG_NETFILTER_SYNPROXY=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
+CONFIG_NFT_NUMGEN=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_REJECT_INET=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB=m
+CONFIG_NFT_FIB_INET=m
+CONFIG_NF_DUP_NETDEV=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NETFILTER_XTABLES=m
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
+
+#
+# Xtables targets
+#
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_NAT=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+
+#
+# Xtables matches
+#
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ECN=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_L2TP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPMARK=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
+CONFIG_IP_SET_HASH_MAC=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=15
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_FO=m
+CONFIG_IP_VS_OVF=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS SH scheduler
+#
+CONFIG_IP_VS_SH_TAB_BITS=8
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_PE_SIP=m
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_FLOW_TABLE_IPV4=m
+CONFIG_NF_DUP_IPV4=m
+CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
+CONFIG_NF_REJECT_IPV4=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_NAT_MASQUERADE_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_REJECT_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
+CONFIG_NF_FLOW_TABLE_IPV6=m
+CONFIG_NF_DUP_IPV6=m
+CONFIG_NF_REJECT_IPV6=m
+CONFIG_NF_LOG_IPV6=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_MASQUERADE_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+
+#
+# DCCP CCIDs Configuration
+#
+# CONFIG_IP_DCCP_CCID2_DEBUG is not set
+CONFIG_IP_DCCP_CCID3=y
+# CONFIG_IP_DCCP_CCID3_DEBUG is not set
+CONFIG_IP_DCCP_TFRC_LIB=y
+
+#
+# DCCP Kernel Hacking
+#
+# CONFIG_IP_DCCP_DEBUG is not set
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
+CONFIG_SCTP_COOKIE_HMAC_MD5=y
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_INET_SCTP_DIAG=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+# CONFIG_RDS_DEBUG is not set
+CONFIG_TIPC=m
+CONFIG_TIPC_MEDIA_IB=y
+CONFIG_TIPC_MEDIA_UDP=y
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_L2TP=m
+# CONFIG_L2TP_DEBUGFS is not set
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_STP=m
+CONFIG_MRP=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_HAVE_NET_DSA=y
+CONFIG_NET_DSA=m
+# CONFIG_NET_DSA_LEGACY is not set
+CONFIG_NET_DSA_TAG_BRCM=y
+CONFIG_NET_DSA_TAG_BRCM_PREPEND=y
+CONFIG_NET_DSA_TAG_DSA=y
+CONFIG_NET_DSA_TAG_EDSA=y
+CONFIG_NET_DSA_TAG_KSZ=y
+CONFIG_NET_DSA_TAG_LAN9303=y
+CONFIG_NET_DSA_TAG_MTK=y
+CONFIG_NET_DSA_TAG_QCA=y
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+CONFIG_VLAN_8021Q_MVRP=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+CONFIG_LLC2=m
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+CONFIG_PHONET=m
+CONFIG_6LOWPAN=m
+# CONFIG_6LOWPAN_DEBUGFS is not set
+CONFIG_6LOWPAN_NHC=m
+CONFIG_6LOWPAN_NHC_DEST=m
+CONFIG_6LOWPAN_NHC_FRAGMENT=m
+CONFIG_6LOWPAN_NHC_HOP=m
+CONFIG_6LOWPAN_NHC_IPV6=m
+CONFIG_6LOWPAN_NHC_MOBILITY=m
+CONFIG_6LOWPAN_NHC_ROUTING=m
+CONFIG_6LOWPAN_NHC_UDP=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
+CONFIG_IEEE802154=m
+CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y
+CONFIG_IEEE802154_SOCKET=m
+CONFIG_IEEE802154_6LOWPAN=m
+CONFIG_MAC802154=m
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_HHF=m
+CONFIG_NET_SCH_PIE=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_DEFAULT=y
+# CONFIG_DEFAULT_FQ is not set
+# CONFIG_DEFAULT_CODEL is not set
+CONFIG_DEFAULT_FQ_CODEL=y
+# CONFIG_DEFAULT_SFQ is not set
+# CONFIG_DEFAULT_PFIFO_FAST is not set
+CONFIG_DEFAULT_NET_SCH="fq_codel"
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_CANID=m
+CONFIG_NET_EMATCH_IPSET=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_NET_ACT_CONNMARK=m
+CONFIG_NET_ACT_SKBMOD=m
+CONFIG_NET_ACT_IFE=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_IFE_SKBMARK=m
+CONFIG_NET_IFE_SKBPRIO=m
+CONFIG_NET_IFE_SKBTCINDEX=m
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_SCH_FIFO=y
+CONFIG_DCB=y
+CONFIG_DNS_RESOLVER=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_BATMAN_V=y
+CONFIG_BATMAN_ADV_BLA=y
+CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_BATMAN_ADV_MCAST=y
+# CONFIG_BATMAN_ADV_DEBUGFS is not set
+CONFIG_OPENVSWITCH=m
+CONFIG_OPENVSWITCH_GRE=m
+CONFIG_OPENVSWITCH_VXLAN=m
+CONFIG_OPENVSWITCH_GENEVE=m
+CONFIG_VSOCKETS=m
+CONFIG_VSOCKETS_DIAG=m
+CONFIG_VMWARE_VMCI_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS_COMMON=m
+CONFIG_HYPERV_VSOCKETS=m
+CONFIG_NETLINK_DIAG=m
+CONFIG_MPLS=y
+CONFIG_NET_MPLS_GSO=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
+CONFIG_HSR=m
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_NCSI=y
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_NET_FLOW_LIMIT=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_DROP_MONITOR=m
+CONFIG_HAMRADIO=y
+
+#
+# Packet Radio protocols
+#
+CONFIG_AX25=m
+CONFIG_AX25_DAMA_SLAVE=y
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+
+#
+# AX.25 network device drivers
+#
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_BAYCOM_PAR=m
+CONFIG_YAM=m
+CONFIG_CAN=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_BCM=m
+CONFIG_CAN_GW=m
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_VXCAN=m
+CONFIG_CAN_SLCAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_LEDS=y
+CONFIG_CAN_GRCAN=m
+CONFIG_CAN_JANZ_ICAN3=m
+CONFIG_CAN_C_CAN=m
+CONFIG_CAN_C_CAN_PLATFORM=m
+CONFIG_CAN_C_CAN_PCI=m
+CONFIG_CAN_CC770=m
+# CONFIG_CAN_CC770_ISA is not set
+CONFIG_CAN_CC770_PLATFORM=m
+CONFIG_CAN_IFI_CANFD=m
+CONFIG_CAN_M_CAN=m
+CONFIG_CAN_PEAK_PCIEFD=m
+CONFIG_CAN_SJA1000=m
+# CONFIG_CAN_SJA1000_ISA is not set
+CONFIG_CAN_SJA1000_PLATFORM=m
+# CONFIG_CAN_EMS_PCMCIA is not set
+CONFIG_CAN_EMS_PCI=m
+CONFIG_CAN_PEAK_PCMCIA=m
+CONFIG_CAN_PEAK_PCI=m
+CONFIG_CAN_PEAK_PCIEC=y
+CONFIG_CAN_KVASER_PCI=m
+CONFIG_CAN_PLX_PCI=m
+CONFIG_CAN_SOFTING=m
+CONFIG_CAN_SOFTING_CS=m
+
+#
+# CAN SPI interfaces
+#
+CONFIG_CAN_HI311X=m
+CONFIG_CAN_MCP251X=m
+
+#
+# CAN USB interfaces
+#
+CONFIG_CAN_EMS_USB=m
+CONFIG_CAN_ESD_USB2=m
+CONFIG_CAN_GS_USB=m
+CONFIG_CAN_KVASER_USB=m
+CONFIG_CAN_PEAK_USB=m
+CONFIG_CAN_8DEV_USB=m
+CONFIG_CAN_MCBA_USB=m
+# CONFIG_CAN_DEBUG_DEVICES is not set
+CONFIG_BT=m
+CONFIG_BT_BREDR=y
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_HS=y
+CONFIG_BT_LE=y
+CONFIG_BT_6LOWPAN=m
+CONFIG_BT_LEDS=y
+# CONFIG_BT_SELFTEST is not set
+# CONFIG_BT_DEBUGFS is not set
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_INTEL=m
+CONFIG_BT_BCM=m
+CONFIG_BT_RTL=m
+CONFIG_BT_QCA=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
+CONFIG_BT_HCIBTUSB_BCM=y
+CONFIG_BT_HCIBTUSB_RTL=y
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_SERDEV=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_NOKIA=m
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIUART_AG6XX=y
+CONFIG_BT_HCIUART_MRVL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIBTUART=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
+CONFIG_BT_ATH3K=m
+CONFIG_BT_WILINK=m
+CONFIG_AF_RXRPC=m
+CONFIG_AF_RXRPC_IPV6=y
+# CONFIG_AF_RXRPC_INJECT_LOSS is not set
+# CONFIG_AF_RXRPC_DEBUG is not set
+CONFIG_RXKAD=y
+CONFIG_AF_KCM=m
+CONFIG_STREAM_PARSER=y
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
+CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+CONFIG_CFG80211_CRDA_SUPPORT=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_CFG80211_WEXT_EXPORT=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_MINSTREL_VHT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+CONFIG_RFKILL=m
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=m
+CONFIG_NET_9P=m
+CONFIG_NET_9P_VIRTIO=m
+CONFIG_NET_9P_XEN=m
+CONFIG_NET_9P_RDMA=m
+# CONFIG_NET_9P_DEBUG is not set
+CONFIG_CAIF=m
+# CONFIG_CAIF_DEBUG is not set
+CONFIG_CAIF_NETDEV=m
+CONFIG_CAIF_USB=m
+CONFIG_CEPH_LIB=m
+CONFIG_CEPH_LIB_PRETTYDEBUG=y
+CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
+CONFIG_NFC=m
+CONFIG_NFC_DIGITAL=m
+CONFIG_NFC_NCI=m
+CONFIG_NFC_NCI_SPI=m
+CONFIG_NFC_NCI_UART=m
+CONFIG_NFC_HCI=m
+CONFIG_NFC_SHDLC=y
+
+#
+# Near Field Communication (NFC) devices
+#
+CONFIG_NFC_TRF7970A=m
+# CONFIG_NFC_MEI_PHY is not set
+CONFIG_NFC_SIM=m
+CONFIG_NFC_PORT100=m
+CONFIG_NFC_FDP=m
+CONFIG_NFC_FDP_I2C=m
+CONFIG_NFC_PN544=m
+CONFIG_NFC_PN544_I2C=m
+# CONFIG_NFC_PN544_MEI is not set
+CONFIG_NFC_PN533=m
+CONFIG_NFC_PN533_USB=m
+CONFIG_NFC_PN533_I2C=m
+CONFIG_NFC_MICROREAD=m
+CONFIG_NFC_MICROREAD_I2C=m
+# CONFIG_NFC_MICROREAD_MEI is not set
+CONFIG_NFC_MRVL=m
+CONFIG_NFC_MRVL_USB=m
+CONFIG_NFC_MRVL_UART=m
+CONFIG_NFC_MRVL_I2C=m
+CONFIG_NFC_MRVL_SPI=m
+CONFIG_NFC_ST21NFCA=m
+CONFIG_NFC_ST21NFCA_I2C=m
+CONFIG_NFC_ST_NCI=m
+CONFIG_NFC_ST_NCI_I2C=m
+CONFIG_NFC_ST_NCI_SPI=m
+CONFIG_NFC_NXP_NCI=m
+CONFIG_NFC_NXP_NCI_I2C=m
+CONFIG_NFC_S3FWRN5=m
+CONFIG_NFC_S3FWRN5_I2C=m
+CONFIG_NFC_ST95HF=m
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
+CONFIG_LWTUNNEL=y
+CONFIG_LWTUNNEL_BPF=y
+CONFIG_DST_CACHE=y
+CONFIG_GRO_CELLS=y
+CONFIG_NET_DEVLINK=m
+CONFIG_MAY_USE_DEVLINK=m
+CONFIG_HAVE_EBPF_JIT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
+CONFIG_WANT_DEV_COREDUMP=y
+CONFIG_ALLOW_DEV_COREDUMP=y
+CONFIG_DEV_COREDUMP=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
+# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
+CONFIG_SYS_HYPERVISOR=y
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_SPMI=m
+CONFIG_REGMAP_W1=m
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_REGMAP_SOUNDWIRE=m
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_DMA_FENCE_TRACE is not set
+
+#
+# Bus devices
+#
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=m
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+CONFIG_MTD_CMDLINE_PARTS=m
+CONFIG_MTD_OF_PARTS=m
+CONFIG_MTD_AR7_PARTS=m
+
+#
+# Partition parsers
+#
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+CONFIG_RFD_FTL=m
+CONFIG_SSFDC=m
+CONFIG_SM_FTL=m
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_SWAP=m
+CONFIG_MTD_PARTITIONED_MASTER=y
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=m
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+CONFIG_MTD_PHYSMAP_OF=m
+CONFIG_MTD_PHYSMAP_OF_VERSATILE=y
+CONFIG_MTD_PHYSMAP_OF_GEMINI=y
+CONFIG_MTD_SBC_GXX=m
+CONFIG_MTD_AMD76XROM=m
+CONFIG_MTD_ICHXROM=m
+CONFIG_MTD_ESB2ROM=m
+CONFIG_MTD_CK804XROM=m
+CONFIG_MTD_SCB2_FLASH=m
+CONFIG_MTD_NETtel=m
+CONFIG_MTD_L440GX=m
+CONFIG_MTD_PCI=m
+CONFIG_MTD_PCMCIA=m
+# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
+CONFIG_MTD_GPIO_ADDR=m
+CONFIG_MTD_INTEL_VR_NOR=m
+CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_LATCH_ADDR=m
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_PMC551=m
+# CONFIG_MTD_PMC551_BUGFIX is not set
+# CONFIG_MTD_PMC551_DEBUG is not set
+CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_MTD_M25P80=m
+CONFIG_MTD_MCHP23K256=m
+CONFIG_MTD_SST25L=m
+CONFIG_MTD_SLRAM=m
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+CONFIG_MTD_BLOCK2MTD=m
+
+#
+# Disk-On-Chip Device Drivers
+#
+CONFIG_MTD_DOCG3=m
+CONFIG_BCH_CONST_M=14
+CONFIG_BCH_CONST_T=4
+CONFIG_MTD_NAND_ECC=m
+CONFIG_MTD_NAND_ECC_SMC=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_BCH=m
+CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_SM_COMMON=m
+CONFIG_MTD_NAND_DENALI=m
+CONFIG_MTD_NAND_DENALI_PCI=m
+CONFIG_MTD_NAND_DENALI_DT=m
+CONFIG_MTD_NAND_GPIO=m
+# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
+CONFIG_MTD_NAND_RICOH=m
+CONFIG_MTD_NAND_DISKONCHIP=m
+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
+CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
+CONFIG_MTD_NAND_DOCG4=m
+CONFIG_MTD_NAND_CAFE=m
+CONFIG_MTD_NAND_NANDSIM=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_ONENAND=m
+# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
+CONFIG_MTD_ONENAND_GENERIC=m
+CONFIG_MTD_ONENAND_OTP=y
+CONFIG_MTD_ONENAND_2X_PROGRAM=y
+
+#
+# LPDDR & LPDDR2 PCM memory drivers
+#
+CONFIG_MTD_LPDDR=m
+CONFIG_MTD_QINFO_PROBE=m
+CONFIG_MTD_SPI_NOR=m
+CONFIG_MTD_MT81xx_NOR=m
+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
+CONFIG_SPI_INTEL_SPI=m
+CONFIG_SPI_INTEL_SPI_PCI=m
+CONFIG_SPI_INTEL_SPI_PLATFORM=m
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_OF=y
+# CONFIG_OF_UNITTEST is not set
+CONFIG_OF_KOBJ=y
+CONFIG_OF_DYNAMIC=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_NET=y
+CONFIG_OF_MDIO=m
+CONFIG_OF_RESOLVE=y
+CONFIG_OF_OVERLAY=y
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_SERIAL=m
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_PARPORT_PC_SUPERIO=y
+CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_GSC is not set
+CONFIG_PARPORT_AX88796=m
+CONFIG_PARPORT_1284=y
+CONFIG_PARPORT_NOT_PC=y
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG_MESSAGES=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+CONFIG_BLK_DEV_FD=m
+CONFIG_CDROM=m
+# CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
+CONFIG_ZRAM=m
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_BLK_DEV_DAC960=m
+CONFIG_BLK_DEV_UMEM=m
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
+# CONFIG_DRBD_FAULT_INJECTION is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_SKD=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+CONFIG_ATA_OVER_ETH=m
+CONFIG_XEN_BLKDEV_FRONTEND=m
+CONFIG_XEN_BLKDEV_BACKEND=m
+CONFIG_VIRTIO_BLK=m
+# CONFIG_VIRTIO_BLK_SCSI is not set
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_RSXX=m
+
+#
+# NVME Support
+#
+CONFIG_NVME_CORE=y
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_FABRICS=m
+CONFIG_NVME_RDMA=m
+CONFIG_NVME_FC=m
+CONFIG_NVME_TARGET=m
+CONFIG_NVME_TARGET_LOOP=m
+CONFIG_NVME_TARGET_RDMA=m
+CONFIG_NVME_TARGET_FC=m
+CONFIG_NVME_TARGET_FCLOOP=m
+
+#
+# Misc devices
+#
+CONFIG_SENSORS_LIS3LV02D=m
+CONFIG_AD525X_DPOT=m
+CONFIG_AD525X_DPOT_I2C=m
+CONFIG_AD525X_DPOT_SPI=m
+# CONFIG_DUMMY_IRQ is not set
+CONFIG_IBM_ASM=m
+CONFIG_PHANTOM=m
+CONFIG_SGI_IOC4=m
+CONFIG_TIFM_CORE=m
+CONFIG_TIFM_7XX1=m
+CONFIG_ICS932S401=m
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_HP_ILO=m
+CONFIG_APDS9802ALS=m
+CONFIG_ISL29003=m
+CONFIG_ISL29020=m
+CONFIG_SENSORS_TSL2550=m
+CONFIG_SENSORS_BH1770=m
+CONFIG_SENSORS_APDS990X=m
+CONFIG_HMC6352=m
+CONFIG_DS1682=m
+CONFIG_VMWARE_BALLOON=m
+CONFIG_USB_SWITCH_FSA9480=m
+CONFIG_LATTICE_ECP3_CONFIG=m
+# CONFIG_SRAM is not set
+CONFIG_PCI_ENDPOINT_TEST=m
+CONFIG_MISC_RTSX=m
+CONFIG_C2PORT=m
+CONFIG_C2PORT_DURAMAR_2150=m
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+CONFIG_EEPROM_LEGACY=m
+CONFIG_EEPROM_MAX6875=m
+CONFIG_EEPROM_93CX6=m
+# CONFIG_EEPROM_93XX46 is not set
+CONFIG_EEPROM_IDT_89HPESX=m
+CONFIG_CB710_CORE=m
+# CONFIG_CB710_DEBUG is not set
+CONFIG_CB710_DEBUG_ASSUMPTIONS=y
+
+#
+# Texas Instruments shared transport line discipline
+#
+CONFIG_TI_ST=m
+CONFIG_SENSORS_LIS3_I2C=m
+CONFIG_ALTERA_STAPL=m
+# CONFIG_INTEL_MEI is not set
+# CONFIG_INTEL_MEI_ME is not set
+# CONFIG_INTEL_MEI_TXE is not set
+CONFIG_VMWARE_VMCI=m
+
+#
+# Intel MIC & related support
+#
+
+#
+# Intel MIC Bus Driver
+#
+CONFIG_INTEL_MIC_BUS=m
+
+#
+# SCIF Bus Driver
+#
+CONFIG_SCIF_BUS=m
+
+#
+# VOP Bus Driver
+#
+CONFIG_VOP_BUS=m
+
+#
+# Intel MIC Host Driver
+#
+CONFIG_INTEL_MIC_HOST=m
+
+#
+# Intel MIC Card Driver
+#
+CONFIG_INTEL_MIC_CARD=m
+
+#
+# SCIF Driver
+#
+CONFIG_SCIF=m
+
+#
+# Intel MIC Coprocessor State Management (COSM) Drivers
+#
+CONFIG_MIC_COSM=m
+
+#
+# VOP Driver
+#
+CONFIG_VOP=m
+CONFIG_VHOST_RING=m
+CONFIG_GENWQE=m
+CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
+CONFIG_ECHO=m
+# CONFIG_CXL_BASE is not set
+# CONFIG_CXL_AFU_DRIVER_OPS is not set
+# CONFIG_CXL_LIB is not set
+# CONFIG_OCXL_BASE is not set
+CONFIG_MISC_RTSX_PCI=m
+CONFIG_MISC_RTSX_USB=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_NETLINK=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_SCSI_BNX2X_FCOE=m
+CONFIG_BE2ISCSI=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_HPSA=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_3W_SAS=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC79XX=m
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC94XX=m
+CONFIG_AIC94XX_DEBUG=y
+CONFIG_SCSI_MVSAS=m
+CONFIG_SCSI_MVSAS_DEBUG=y
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_MVUMI=m
+CONFIG_SCSI_DPT_I2O=m
+CONFIG_SCSI_ADVANSYS=m
+CONFIG_SCSI_ARCMSR=m
+CONFIG_SCSI_ESAS2R=m
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=m
+CONFIG_MEGARAID_MAILBOX=m
+CONFIG_MEGARAID_LEGACY=m
+CONFIG_MEGARAID_SAS=m
+CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+CONFIG_SCSI_MPT3SAS_MAX_SGE=128
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PCI=m
+# CONFIG_SCSI_UFS_DWC_TC_PCI is not set
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set
+CONFIG_SCSI_HPTIOP=m
+CONFIG_SCSI_BUSLOGIC=m
+CONFIG_SCSI_FLASHPOINT=y
+CONFIG_VMWARE_PVSCSI=m
+CONFIG_XEN_SCSI_FRONTEND=m
+CONFIG_HYPERV_STORAGE=m
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_FCOE_FNIC=m
+CONFIG_SCSI_SNIC=m
+# CONFIG_SCSI_SNIC_DEBUG_FS is not set
+CONFIG_SCSI_DMX3191D=m
+CONFIG_SCSI_EATA=m
+CONFIG_SCSI_EATA_TAGGED_QUEUE=y
+CONFIG_SCSI_EATA_LINKED_COMMANDS=y
+CONFIG_SCSI_EATA_MAX_TAGS=16
+CONFIG_SCSI_FUTURE_DOMAIN=m
+CONFIG_SCSI_GDTH=m
+CONFIG_SCSI_ISCI=m
+CONFIG_SCSI_IPS=m
+CONFIG_SCSI_INITIO=m
+CONFIG_SCSI_INIA100=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+# CONFIG_SCSI_IZIP_EPP16 is not set
+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
+CONFIG_SCSI_STEX=m
+CONFIG_SCSI_SYM53C8XX_2=m
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+CONFIG_SCSI_SYM53C8XX_MMIO=y
+CONFIG_SCSI_IPR=m
+CONFIG_SCSI_IPR_TRACE=y
+CONFIG_SCSI_IPR_DUMP=y
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_TCM_QLA2XXX=m
+# CONFIG_TCM_QLA2XXX_DEBUG is not set
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_QEDI=m
+CONFIG_QEDF=m
+CONFIG_SCSI_LPFC=m
+# CONFIG_SCSI_LPFC_DEBUG_FS is not set
+CONFIG_SCSI_DC395x=m
+CONFIG_SCSI_AM53C974=m
+CONFIG_SCSI_WD719X=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_PMCRAID=m
+CONFIG_SCSI_PM8001=m
+CONFIG_SCSI_BFA_FC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_CHELSIO_FCOE=m
+CONFIG_SCSI_LOWLEVEL_PCMCIA=y
+CONFIG_PCMCIA_AHA152X=m
+CONFIG_PCMCIA_FDOMAIN=m
+CONFIG_PCMCIA_QLOGIC=m
+CONFIG_PCMCIA_SYM53C500=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_SCSI_OSD_DPRINT_SENSE=0
+# CONFIG_SCSI_OSD_DEBUG is not set
+CONFIG_ATA=m
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_ATA_ACPI=y
+CONFIG_SATA_ZPODD=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=m
+CONFIG_SATA_MOBILE_LPM_POLICY=3
+CONFIG_SATA_AHCI_PLATFORM=m
+CONFIG_AHCI_CEVA=m
+CONFIG_AHCI_QORIQ=m
+CONFIG_SATA_INIC162X=m
+CONFIG_SATA_ACARD_AHCI=m
+CONFIG_SATA_SIL24=m
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+CONFIG_PDC_ADMA=m
+CONFIG_SATA_QSTOR=m
+CONFIG_SATA_SX4=m
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=m
+CONFIG_SATA_DWC=m
+# CONFIG_SATA_DWC_OLD_DMA is not set
+# CONFIG_SATA_DWC_DEBUG is not set
+CONFIG_SATA_MV=m
+CONFIG_SATA_NV=m
+CONFIG_SATA_PROMISE=m
+CONFIG_SATA_SIL=m
+CONFIG_SATA_SIS=m
+CONFIG_SATA_SVW=m
+CONFIG_SATA_ULI=m
+CONFIG_SATA_VIA=m
+CONFIG_SATA_VITESSE=m
+
+#
+# PATA SFF controllers with BMDMA
+#
+CONFIG_PATA_ALI=m
+CONFIG_PATA_AMD=m
+CONFIG_PATA_ARTOP=m
+CONFIG_PATA_ATIIXP=m
+CONFIG_PATA_ATP867X=m
+CONFIG_PATA_CMD64X=m
+CONFIG_PATA_CYPRESS=m
+CONFIG_PATA_EFAR=m
+CONFIG_PATA_HPT366=m
+CONFIG_PATA_HPT37X=m
+CONFIG_PATA_HPT3X2N=m
+CONFIG_PATA_HPT3X3=m
+CONFIG_PATA_HPT3X3_DMA=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_IT821X=m
+CONFIG_PATA_JMICRON=m
+CONFIG_PATA_MARVELL=m
+CONFIG_PATA_NETCELL=m
+CONFIG_PATA_NINJA32=m
+CONFIG_PATA_NS87415=m
+CONFIG_PATA_OLDPIIX=m
+CONFIG_PATA_OPTIDMA=m
+CONFIG_PATA_PDC2027X=m
+CONFIG_PATA_PDC_OLD=m
+CONFIG_PATA_RADISYS=m
+CONFIG_PATA_RDC=m
+CONFIG_PATA_SCH=m
+CONFIG_PATA_SERVERWORKS=m
+CONFIG_PATA_SIL680=m
+CONFIG_PATA_SIS=m
+CONFIG_PATA_TOSHIBA=m
+CONFIG_PATA_TRIFLEX=m
+CONFIG_PATA_VIA=m
+CONFIG_PATA_WINBOND=m
+
+#
+# PIO-only SFF controllers
+#
+CONFIG_PATA_CMD640_PCI=m
+CONFIG_PATA_MPIIX=m
+CONFIG_PATA_NS87410=m
+CONFIG_PATA_OPTI=m
+CONFIG_PATA_PCMCIA=m
+# CONFIG_PATA_PLATFORM is not set
+CONFIG_PATA_RZ1000=m
+
+#
+# Generic fallback / legacy drivers
+#
+CONFIG_PATA_ACPI=m
+CONFIG_ATA_GENERIC=m
+CONFIG_PATA_LEGACY=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
+# CONFIG_BCACHE_DEBUG is not set
+# CONFIG_BCACHE_CLOSURES_DEBUG is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=m
+# CONFIG_DM_MQ_DEFAULT is not set
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_BUFIO=m
+# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
+CONFIG_DM_BIO_PRISON=m
+CONFIG_DM_PERSISTENT_DATA=m
+CONFIG_DM_UNSTRIPED=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
+CONFIG_DM_CACHE_SMQ=m
+CONFIG_DM_ERA=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_SWITCH=m
+CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
+CONFIG_DM_ZONED=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_USER2=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_TCM_FC=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_ISCSI_TARGET_CXGB4=m
+CONFIG_SBP_TARGET=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_FC=m
+CONFIG_FUSION_SAS=m
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+# CONFIG_FUSION_LOGGING is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+CONFIG_FIREWIRE=m
+CONFIG_FIREWIRE_OHCI=m
+CONFIG_FIREWIRE_SBP2=m
+CONFIG_FIREWIRE_NET=m
+CONFIG_FIREWIRE_NOSY=m
+CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_MAC_EMUMOUSEBTN=m
+CONFIG_NETDEVICES=y
+CONFIG_MII=m
+CONFIG_NET_CORE=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_NET_FC=y
+CONFIG_IFB=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
+CONFIG_VXLAN=m
+CONFIG_GENEVE=m
+CONFIG_GTP=m
+CONFIG_MACSEC=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_NTB_NETDEV=m
+CONFIG_RIONET=m
+CONFIG_RIONET_TX_SIZE=128
+CONFIG_RIONET_RX_SIZE=128
+CONFIG_TUN=m
+CONFIG_TAP=m
+# CONFIG_TUN_VNET_CROSS_LE is not set
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_NET_VRF=m
+CONFIG_VSOCKMON=m
+CONFIG_SUNGEM_PHY=m
+# CONFIG_ARCNET is not set
+CONFIG_ATM_DRIVERS=y
+# CONFIG_ATM_DUMMY is not set
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+# CONFIG_ATM_ENI_DEBUG is not set
+# CONFIG_ATM_ENI_TUNE_BURST is not set
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+# CONFIG_ATM_ZATM_DEBUG is not set
+CONFIG_ATM_NICSTAR=m
+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
+CONFIG_ATM_IDT77252=m
+# CONFIG_ATM_IDT77252_DEBUG is not set
+# CONFIG_ATM_IDT77252_RCV_ALL is not set
+CONFIG_ATM_IDT77252_USE_SUNI=y
+CONFIG_ATM_AMBASSADOR=m
+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
+CONFIG_ATM_HORIZON=m
+# CONFIG_ATM_HORIZON_DEBUG is not set
+CONFIG_ATM_IA=m
+# CONFIG_ATM_IA_DEBUG is not set
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_FORE200E_USE_TASKLET=y
+CONFIG_ATM_FORE200E_TX_RETRY=16
+CONFIG_ATM_FORE200E_DEBUG=0
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_ATM_SOLOS=m
+
+#
+# CAIF transport drivers
+#
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_SPI_SLAVE=m
+CONFIG_CAIF_SPI_SYNC=y
+CONFIG_CAIF_HSI=m
+CONFIG_CAIF_VIRTIO=m
+
+#
+# Distributed Switch Architecture drivers
+#
+CONFIG_B53=m
+# CONFIG_B53_SPI_DRIVER is not set
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
+CONFIG_NET_DSA_LOOP=m
+CONFIG_NET_DSA_MT7530=m
+CONFIG_MICROCHIP_KSZ=m
+CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m
+CONFIG_NET_DSA_MV88E6XXX=m
+CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y
+CONFIG_NET_DSA_QCA8K=m
+CONFIG_NET_DSA_SMSC_LAN9303=m
+CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
+CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
+CONFIG_ETHERNET=y
+CONFIG_MDIO=m
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_NET_VENDOR_ADAPTEC=y
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_NET_VENDOR_AGERE=y
+CONFIG_ET131X=m
+CONFIG_NET_VENDOR_ALACRITECH=y
+CONFIG_SLICOSS=m
+CONFIG_NET_VENDOR_ALTEON=y
+CONFIG_ACENIC=m
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_ALTERA_TSE=m
+CONFIG_NET_VENDOR_AMAZON=y
+CONFIG_ENA_ETHERNET=m
+CONFIG_NET_VENDOR_AMD=y
+CONFIG_AMD8111_ETH=m
+CONFIG_PCNET32=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_AMD_XGBE=m
+CONFIG_AMD_XGBE_DCB=y
+CONFIG_AMD_XGBE_HAVE_ECC=y
+CONFIG_NET_VENDOR_AQUANTIA=y
+CONFIG_AQTION=m
+CONFIG_NET_VENDOR_ARC=y
+CONFIG_NET_VENDOR_ATHEROS=y
+CONFIG_ATL2=m
+CONFIG_ATL1=m
+CONFIG_ATL1E=m
+CONFIG_ATL1C=m
+CONFIG_ALX=m
+CONFIG_NET_VENDOR_AURORA=y
+CONFIG_AURORA_NB8800=m
+CONFIG_NET_CADENCE=y
+CONFIG_MACB=m
+CONFIG_MACB_USE_HWSTAMP=y
+CONFIG_MACB_PCI=m
+CONFIG_NET_VENDOR_BROADCOM=y
+CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
+CONFIG_BCMGENET=m
+CONFIG_BNX2=m
+CONFIG_CNIC=m
+CONFIG_TIGON3=m
+CONFIG_TIGON3_HWMON=y
+CONFIG_BNX2X=m
+CONFIG_BNX2X_SRIOV=y
+CONFIG_SYSTEMPORT=m
+CONFIG_BNXT=m
+CONFIG_BNXT_SRIOV=y
+CONFIG_BNXT_FLOWER_OFFLOAD=y
+CONFIG_BNXT_DCB=y
+CONFIG_NET_VENDOR_BROCADE=y
+CONFIG_BNA=m
+CONFIG_NET_VENDOR_CAVIUM=y
+CONFIG_THUNDER_NIC_PF=m
+CONFIG_THUNDER_NIC_VF=m
+CONFIG_THUNDER_NIC_BGX=m
+CONFIG_THUNDER_NIC_RGX=m
+CONFIG_CAVIUM_PTP=m
+CONFIG_LIQUIDIO=m
+CONFIG_LIQUIDIO_VF=m
+CONFIG_NET_VENDOR_CHELSIO=y
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
+CONFIG_CHELSIO_T4_DCB=y
+CONFIG_CHELSIO_T4_FCOE=y
+CONFIG_CHELSIO_T4VF=m
+CONFIG_CHELSIO_LIB=m
+CONFIG_NET_VENDOR_CISCO=y
+CONFIG_ENIC=m
+CONFIG_NET_VENDOR_CORTINA=y
+CONFIG_GEMINI_ETHERNET=m
+CONFIG_CX_ECAT=m
+CONFIG_DNET=m
+CONFIG_NET_VENDOR_DEC=y
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_DE2104X_DSL=0
+CONFIG_TULIP=m
+CONFIG_TULIP_MWI=y
+CONFIG_TULIP_MMIO=y
+CONFIG_TULIP_NAPI=y
+CONFIG_TULIP_NAPI_HW_MITIGATION=y
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+CONFIG_DM9102=m
+CONFIG_ULI526X=m
+CONFIG_PCMCIA_XIRCOM=m
+CONFIG_NET_VENDOR_DLINK=y
+CONFIG_DL2K=m
+CONFIG_SUNDANCE=m
+# CONFIG_SUNDANCE_MMIO is not set
+CONFIG_NET_VENDOR_EMULEX=y
+CONFIG_BE2NET=m
+CONFIG_BE2NET_HWMON=y
+CONFIG_NET_VENDOR_EZCHIP=y
+CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m
+CONFIG_NET_VENDOR_EXAR=y
+CONFIG_S2IO=m
+CONFIG_VXGE=m
+# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
+CONFIG_NET_VENDOR_FUJITSU=y
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_NET_VENDOR_HP=y
+CONFIG_HP100=m
+CONFIG_NET_VENDOR_HUAWEI=y
+CONFIG_HINIC=m
+CONFIG_NET_VENDOR_INTEL=y
+CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_E1000E=m
+CONFIG_E1000E_HWTS=y
+CONFIG_IGB=m
+CONFIG_IGB_HWMON=y
+CONFIG_IGB_DCA=y
+CONFIG_IGBVF=m
+CONFIG_IXGB=m
+CONFIG_IXGBE=m
+CONFIG_IXGBE_HWMON=y
+CONFIG_IXGBE_DCA=y
+CONFIG_IXGBE_DCB=y
+CONFIG_IXGBEVF=m
+CONFIG_I40E=m
+CONFIG_I40E_DCB=y
+CONFIG_I40EVF=m
+CONFIG_FM10K=m
+CONFIG_NET_VENDOR_I825XX=y
+CONFIG_JME=m
+CONFIG_NET_VENDOR_MARVELL=y
+CONFIG_MVMDIO=m
+CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
+CONFIG_SKGE_GENESIS=y
+CONFIG_SKY2=m
+# CONFIG_SKY2_DEBUG is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+CONFIG_MLX4_EN=m
+CONFIG_MLX4_EN_DCB=y
+CONFIG_MLX4_CORE=m
+CONFIG_MLX4_DEBUG=y
+CONFIG_MLX4_CORE_GEN2=y
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_ACCEL=y
+CONFIG_MLX5_FPGA=y
+CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_MPFS=y
+CONFIG_MLX5_ESWITCH=y
+CONFIG_MLX5_CORE_EN_DCB=y
+CONFIG_MLX5_CORE_IPOIB=y
+CONFIG_MLX5_EN_IPSEC=y
+CONFIG_MLXSW_CORE=m
+CONFIG_MLXSW_CORE_HWMON=y
+CONFIG_MLXSW_CORE_THERMAL=y
+CONFIG_MLXSW_PCI=m
+CONFIG_MLXSW_I2C=m
+CONFIG_MLXSW_SWITCHIB=m
+CONFIG_MLXSW_SWITCHX2=m
+CONFIG_MLXSW_SPECTRUM=m
+CONFIG_MLXSW_SPECTRUM_DCB=y
+CONFIG_MLXSW_MINIMAL=m
+CONFIG_MLXFW=m
+CONFIG_NET_VENDOR_MICREL=y
+CONFIG_KS8842=m
+CONFIG_KS8851=m
+CONFIG_KS8851_MLL=m
+CONFIG_KSZ884X_PCI=m
+CONFIG_NET_VENDOR_MICROCHIP=y
+CONFIG_ENC28J60=m
+# CONFIG_ENC28J60_WRITEVERIFY is not set
+CONFIG_ENCX24J600=m
+CONFIG_NET_VENDOR_MYRI=y
+CONFIG_MYRI10GE=m
+CONFIG_MYRI10GE_DCA=y
+CONFIG_FEALNX=m
+CONFIG_NET_VENDOR_NATSEMI=y
+CONFIG_NATSEMI=m
+CONFIG_NS83820=m
+CONFIG_NET_VENDOR_NETRONOME=y
+CONFIG_NFP=m
+CONFIG_NFP_APP_FLOWER=y
+# CONFIG_NFP_DEBUG is not set
+CONFIG_NET_VENDOR_8390=y
+CONFIG_PCMCIA_AXNET=m
+CONFIG_NE2K_PCI=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_NET_VENDOR_NVIDIA=y
+CONFIG_FORCEDETH=m
+CONFIG_NET_VENDOR_OKI=y
+CONFIG_ETHOC=m
+CONFIG_NET_PACKET_ENGINE=y
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_NET_VENDOR_QLOGIC=y
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLCNIC_SRIOV=y
+CONFIG_QLCNIC_DCB=y
+CONFIG_QLCNIC_HWMON=y
+CONFIG_QLGE=m
+CONFIG_NETXEN_NIC=m
+CONFIG_QED=m
+CONFIG_QED_LL2=y
+CONFIG_QED_SRIOV=y
+CONFIG_QEDE=m
+CONFIG_QED_RDMA=y
+CONFIG_QED_ISCSI=y
+CONFIG_QED_FCOE=y
+CONFIG_QED_OOO=y
+CONFIG_NET_VENDOR_QUALCOMM=y
+CONFIG_QCA7000=m
+CONFIG_QCA7000_SPI=m
+CONFIG_QCA7000_UART=m
+CONFIG_QCOM_EMAC=m
+CONFIG_RMNET=m
+CONFIG_NET_VENDOR_REALTEK=y
+CONFIG_ATP=m
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+# CONFIG_8139TOO_PIO is not set
+CONFIG_8139TOO_TUNE_TWISTER=y
+CONFIG_8139TOO_8129=y
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_R8169=m
+CONFIG_NET_VENDOR_RENESAS=y
+CONFIG_NET_VENDOR_RDC=y
+CONFIG_R6040=m
+CONFIG_NET_VENDOR_ROCKER=y
+CONFIG_ROCKER=m
+CONFIG_NET_VENDOR_SAMSUNG=y
+CONFIG_SXGBE_ETH=m
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+CONFIG_SC92031=m
+CONFIG_NET_VENDOR_SIS=y
+CONFIG_SIS900=m
+CONFIG_SIS190=m
+CONFIG_NET_VENDOR_SOLARFLARE=y
+CONFIG_SFC=m
+CONFIG_SFC_MTD=y
+CONFIG_SFC_MCDI_MON=y
+CONFIG_SFC_SRIOV=y
+CONFIG_SFC_MCDI_LOGGING=y
+CONFIG_SFC_FALCON=m
+CONFIG_SFC_FALCON_MTD=y
+CONFIG_NET_VENDOR_SMSC=y
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_EPIC100=m
+CONFIG_SMSC911X=m
+# CONFIG_SMSC911X_ARCH_HOOKS is not set
+CONFIG_SMSC9420=m
+CONFIG_NET_VENDOR_SOCIONEXT=y
+CONFIG_NET_VENDOR_STMICRO=y
+CONFIG_STMMAC_ETH=m
+CONFIG_STMMAC_PLATFORM=m
+CONFIG_DWMAC_DWC_QOS_ETH=m
+CONFIG_DWMAC_GENERIC=m
+CONFIG_STMMAC_PCI=m
+CONFIG_NET_VENDOR_SUN=y
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
+CONFIG_NIU=m
+CONFIG_NET_VENDOR_TEHUTI=y
+CONFIG_TEHUTI=m
+CONFIG_NET_VENDOR_TI=y
+CONFIG_TI_CPSW_ALE=m
+CONFIG_TLAN=m
+CONFIG_NET_VENDOR_VIA=y
+CONFIG_VIA_RHINE=m
+CONFIG_VIA_RHINE_MMIO=y
+CONFIG_VIA_VELOCITY=m
+CONFIG_NET_VENDOR_WIZNET=y
+CONFIG_WIZNET_W5100=m
+CONFIG_WIZNET_W5300=m
+# CONFIG_WIZNET_BUS_DIRECT is not set
+# CONFIG_WIZNET_BUS_INDIRECT is not set
+CONFIG_WIZNET_BUS_ANY=y
+CONFIG_WIZNET_W5100_SPI=m
+CONFIG_NET_VENDOR_XIRCOM=y
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_NET_VENDOR_SYNOPSYS=y
+CONFIG_DWC_XLGMAC=m
+CONFIG_DWC_XLGMAC_PCI=m
+CONFIG_FDDI=m
+CONFIG_DEFXX=m
+CONFIG_DEFXX_MMIO=y
+CONFIG_SKFP=m
+# CONFIG_HIPPI is not set
+CONFIG_NET_SB1000=m
+CONFIG_MDIO_DEVICE=m
+CONFIG_MDIO_BUS=m
+CONFIG_MDIO_BCM_UNIMAC=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_MDIO_BUS_MUX=m
+CONFIG_MDIO_BUS_MUX_GPIO=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=m
+CONFIG_MDIO_CAVIUM=m
+CONFIG_MDIO_GPIO=m
+CONFIG_MDIO_HISI_FEMAC=m
+CONFIG_MDIO_OCTEON=m
+CONFIG_MDIO_THUNDER=m
+CONFIG_PHYLIB=m
+CONFIG_SWPHY=y
+CONFIG_LED_TRIGGER_PHY=y
+
+#
+# MII PHY device drivers
+#
+CONFIG_AMD_PHY=m
+CONFIG_AQUANTIA_PHY=m
+CONFIG_AT803X_PHY=m
+CONFIG_BCM7XXX_PHY=m
+CONFIG_BCM87XX_PHY=m
+CONFIG_BCM_NET_PHYLIB=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_CORTINA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_DP83822_PHY=m
+CONFIG_DP83848_PHY=m
+CONFIG_DP83867_PHY=m
+CONFIG_FIXED_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_INTEL_XWAY_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MICREL_PHY=m
+CONFIG_MICROCHIP_PHY=m
+CONFIG_MICROSEMI_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_RENESAS_PHY=m
+CONFIG_ROCKCHIP_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_STE10XP=m
+CONFIG_TERANETICS_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_XILINX_GMII2RGMII=m
+CONFIG_MICREL_KS8995MA=m
+CONFIG_PLIP=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLHC=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+
+#
+# Host-side USB support is needed for USB Network Adapter support
+#
+CONFIG_USB_NET_DRIVERS=m
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_AX88179_178A=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_HUAWEI_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9700=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_NET_CX82310_ETH=m
+CONFIG_USB_NET_KALMIA=m
+CONFIG_USB_NET_QMI_WWAN=m
+CONFIG_USB_HSO=m
+CONFIG_USB_NET_INT51X1=m
+CONFIG_USB_CDC_PHONET=m
+CONFIG_USB_IPHETH=m
+CONFIG_USB_SIERRA_NET=m
+CONFIG_USB_VL600=m
+CONFIG_USB_NET_CH9200=m
+CONFIG_WLAN=y
+# CONFIG_WIRELESS_WDS is not set
+CONFIG_WLAN_VENDOR_ADMTEK=y
+CONFIG_ADM8211=m
+CONFIG_ATH_COMMON=m
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH_USER_REGD=y
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+# CONFIG_ATH5K_TRACER is not set
+CONFIG_ATH5K_PCI=y
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_PCI=y
+CONFIG_ATH9K_AHB=y
+# CONFIG_ATH9K_DEBUGFS is not set
+CONFIG_ATH9K_DYNACK=y
+CONFIG_ATH9K_WOW=y
+CONFIG_ATH9K_RFKILL=y
+CONFIG_ATH9K_CHANNEL_CONTEXT=y
+CONFIG_ATH9K_PCOEM=y
+CONFIG_ATH9K_HTC=m
+# CONFIG_ATH9K_HTC_DEBUGFS is not set
+CONFIG_ATH9K_HWRNG=y
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+# CONFIG_CARL9170_HWRNG is not set
+CONFIG_ATH6KL=m
+CONFIG_ATH6KL_SDIO=m
+CONFIG_ATH6KL_USB=m
+# CONFIG_ATH6KL_DEBUG is not set
+# CONFIG_ATH6KL_TRACING is not set
+CONFIG_AR5523=m
+CONFIG_WIL6210=m
+CONFIG_WIL6210_ISR_COR=y
+CONFIG_WIL6210_TRACING=y
+# CONFIG_WIL6210_DEBUGFS is not set
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_AHB=y
+CONFIG_ATH10K_SDIO=m
+CONFIG_ATH10K_USB=m
+# CONFIG_ATH10K_DEBUG is not set
+# CONFIG_ATH10K_DEBUGFS is not set
+# CONFIG_ATH10K_TRACING is not set
+CONFIG_WCN36XX=m
+# CONFIG_WCN36XX_DEBUGFS is not set
+CONFIG_WLAN_VENDOR_ATMEL=y
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PCMCIA_ATMEL=m
+CONFIG_AT76C50X_USB=m
+CONFIG_WLAN_VENDOR_BROADCOM=y
+CONFIG_B43=m
+CONFIG_B43_BCMA=y
+CONFIG_B43_SSB=y
+CONFIG_B43_BUSES_BCMA_AND_SSB=y
+# CONFIG_B43_BUSES_BCMA is not set
+# CONFIG_B43_BUSES_SSB is not set
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_SDIO=y
+CONFIG_B43_BCMA_PIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_G=y
+CONFIG_B43_PHY_N=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_PHY_HT=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
+CONFIG_BRCMUTIL=m
+CONFIG_BRCMSMAC=m
+CONFIG_BRCMFMAC=m
+CONFIG_BRCMFMAC_PROTO_BCDC=y
+CONFIG_BRCMFMAC_PROTO_MSGBUF=y
+CONFIG_BRCMFMAC_SDIO=y
+CONFIG_BRCMFMAC_USB=y
+CONFIG_BRCMFMAC_PCIE=y
+# CONFIG_BRCM_TRACING is not set
+CONFIG_BRCMDBG=y
+CONFIG_WLAN_VENDOR_CISCO=y
+CONFIG_AIRO=m
+CONFIG_AIRO_CS=m
+CONFIG_WLAN_VENDOR_INTEL=y
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
+CONFIG_IWLEGACY=m
+CONFIG_IWL4965=m
+CONFIG_IWL3945=m
+
+#
+# iwl3945 / iwl4965 Debugging Options
+#
+# CONFIG_IWLEGACY_DEBUG is not set
+CONFIG_IWLWIFI=m
+CONFIG_IWLWIFI_LEDS=y
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
+CONFIG_IWLWIFI_OPMODE_MODULAR=y
+# CONFIG_IWLWIFI_BCAST_FILTERING is not set
+# CONFIG_IWLWIFI_PCIE_RTPM is not set
+
+#
+# Debugging Options
+#
+# CONFIG_IWLWIFI_DEBUG is not set
+# CONFIG_IWLWIFI_DEVICE_TRACING is not set
+CONFIG_WLAN_VENDOR_INTERSIL=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_HOSTAP_CS=m
+CONFIG_HERMES=m
+CONFIG_HERMES_PRISM=y
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_NORTEL_HERMES=m
+CONFIG_PCI_HERMES=m
+CONFIG_PCMCIA_HERMES=m
+CONFIG_PCMCIA_SPECTRUM=m
+CONFIG_ORINOCO_USB=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_PCI=m
+CONFIG_P54_SPI=m
+# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
+CONFIG_P54_LEDS=y
+CONFIG_PRISM54=m
+CONFIG_WLAN_VENDOR_MARVELL=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_CS=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_SPI=m
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_LIBERTAS_MESH=y
+CONFIG_LIBERTAS_THINFIRM=m
+# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
+CONFIG_LIBERTAS_THINFIRM_USB=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_MWIFIEX_USB=m
+CONFIG_MWL8K=m
+CONFIG_WLAN_VENDOR_MEDIATEK=y
+CONFIG_MT7601U=m
+CONFIG_MT76_CORE=m
+CONFIG_MT76x2E=m
+CONFIG_WLAN_VENDOR_RALINK=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RT2800PCI_RT33XX=y
+CONFIG_RT2800PCI_RT35XX=y
+CONFIG_RT2800PCI_RT53XX=y
+CONFIG_RT2800PCI_RT3290=y
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800USB_RT35XX=y
+CONFIG_RT2800USB_RT3573=y
+CONFIG_RT2800USB_RT53XX=y
+CONFIG_RT2800USB_RT55XX=y
+CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2800_LIB_MMIO=m
+CONFIG_RT2X00_LIB_MMIO=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_WLAN_VENDOR_REALTEK=y
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_RTL_CARDS=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8723BE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192EE=m
+CONFIG_RTL8821AE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTLWIFI=m
+CONFIG_RTLWIFI_PCI=m
+CONFIG_RTLWIFI_USB=m
+CONFIG_RTLWIFI_DEBUG=y
+CONFIG_RTL8192C_COMMON=m
+CONFIG_RTL8723_COMMON=m
+CONFIG_RTLBTCOEXIST=m
+CONFIG_RTL8XXXU=m
+CONFIG_RTL8XXXU_UNTESTED=y
+CONFIG_WLAN_VENDOR_RSI=y
+CONFIG_RSI_91X=m
+# CONFIG_RSI_DEBUGFS is not set
+CONFIG_RSI_SDIO=m
+CONFIG_RSI_USB=m
+CONFIG_WLAN_VENDOR_ST=y
+CONFIG_CW1200=m
+CONFIG_CW1200_WLAN_SDIO=m
+CONFIG_CW1200_WLAN_SPI=m
+CONFIG_WLAN_VENDOR_TI=y
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL12XX=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE=m
+CONFIG_WLCORE_SPI=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_WILINK_PLATFORM_DATA=y
+CONFIG_WLAN_VENDOR_ZYDAS=y
+CONFIG_USB_ZD1201=m
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_WLAN_VENDOR_QUANTENNA=y
+CONFIG_QTNFMAC=m
+CONFIG_QTNFMAC_PEARL_PCIE=m
+CONFIG_PCMCIA_RAYCS=m
+CONFIG_PCMCIA_WL3501=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+
+#
+# WiMAX Wireless Broadband devices
+#
+CONFIG_WIMAX_I2400M=m
+CONFIG_WIMAX_I2400M_USB=m
+CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
+# CONFIG_WAN is not set
+CONFIG_IEEE802154_DRIVERS=m
+CONFIG_IEEE802154_FAKELB=m
+CONFIG_IEEE802154_AT86RF230=m
+# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set
+CONFIG_IEEE802154_MRF24J40=m
+CONFIG_IEEE802154_CC2520=m
+CONFIG_IEEE802154_ATUSB=m
+CONFIG_IEEE802154_ADF7242=m
+CONFIG_IEEE802154_CA8210=m
+# CONFIG_IEEE802154_CA8210_DEBUGFS is not set
+CONFIG_XEN_NETDEV_FRONTEND=m
+CONFIG_XEN_NETDEV_BACKEND=m
+CONFIG_VMXNET3=m
+CONFIG_FUJITSU_ES=m
+CONFIG_THUNDERBOLT_NET=m
+CONFIG_HYPERV_NET=m
+CONFIG_NETDEVSIM=m
+CONFIG_ISDN=y
+CONFIG_ISDN_I4L=m
+CONFIG_ISDN_PPP=y
+CONFIG_ISDN_PPP_VJ=y
+CONFIG_ISDN_MPP=y
+CONFIG_IPPP_FILTER=y
+CONFIG_ISDN_PPP_BSDCOMP=m
+CONFIG_ISDN_AUDIO=y
+CONFIG_ISDN_TTY_FAX=y
+
+#
+# ISDN feature submodules
+#
+CONFIG_ISDN_DIVERSION=m
+
+#
+# ISDN4Linux hardware drivers
+#
+
+#
+# Passive cards
+#
+CONFIG_ISDN_DRV_HISAX=m
+
+#
+# D-channel protocol features
+#
+CONFIG_HISAX_EURO=y
+CONFIG_DE_AOC=y
+# CONFIG_HISAX_NO_SENDCOMPLETE is not set
+# CONFIG_HISAX_NO_LLC is not set
+# CONFIG_HISAX_NO_KEYPAD is not set
+CONFIG_HISAX_1TR6=y
+CONFIG_HISAX_NI1=y
+CONFIG_HISAX_MAX_CARDS=8
+
+#
+# HiSax supported cards
+#
+CONFIG_HISAX_16_3=y
+CONFIG_HISAX_TELESPCI=y
+CONFIG_HISAX_S0BOX=y
+CONFIG_HISAX_FRITZPCI=y
+CONFIG_HISAX_AVM_A1_PCMCIA=y
+CONFIG_HISAX_ELSA=y
+CONFIG_HISAX_DIEHLDIVA=y
+CONFIG_HISAX_SEDLBAUER=y
+CONFIG_HISAX_NETJET=y
+CONFIG_HISAX_NETJET_U=y
+CONFIG_HISAX_NICCY=y
+CONFIG_HISAX_BKM_A4T=y
+CONFIG_HISAX_SCT_QUADRO=y
+CONFIG_HISAX_GAZEL=y
+CONFIG_HISAX_HFC_PCI=y
+CONFIG_HISAX_W6692=y
+CONFIG_HISAX_HFC_SX=y
+CONFIG_HISAX_ENTERNOW_PCI=y
+# CONFIG_HISAX_DEBUG is not set
+
+#
+# HiSax PCMCIA card service modules
+#
+CONFIG_HISAX_SEDLBAUER_CS=m
+CONFIG_HISAX_ELSA_CS=m
+CONFIG_HISAX_AVM_A1_CS=m
+CONFIG_HISAX_TELES_CS=m
+
+#
+# HiSax sub driver modules
+#
+CONFIG_HISAX_ST5481=m
+CONFIG_HISAX_HFCUSB=m
+CONFIG_HISAX_HFC4S8S=m
+CONFIG_HISAX_FRITZ_PCIPNP=m
+CONFIG_ISDN_CAPI=m
+CONFIG_CAPI_TRACE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPIDRV=m
+# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set
+
+#
+# CAPI hardware drivers
+#
+CONFIG_CAPI_AVM=y
+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
+CONFIG_ISDN_DRV_AVMB1_C4=m
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DIVAS=m
+CONFIG_ISDN_DIVAS_BRIPCI=y
+CONFIG_ISDN_DIVAS_PRIPCI=y
+CONFIG_ISDN_DIVAS_DIVACAPI=m
+CONFIG_ISDN_DIVAS_USERIDI=m
+CONFIG_ISDN_DIVAS_MAINT=m
+CONFIG_ISDN_DRV_GIGASET=m
+CONFIG_GIGASET_CAPI=y
+# CONFIG_GIGASET_I4L is not set
+# CONFIG_GIGASET_DUMMYLL is not set
+CONFIG_GIGASET_BASE=m
+CONFIG_GIGASET_M105=m
+CONFIG_GIGASET_M101=m
+# CONFIG_GIGASET_DEBUG is not set
+CONFIG_HYSDN=m
+CONFIG_HYSDN_CAPI=y
+CONFIG_MISDN=m
+CONFIG_MISDN_DSP=m
+CONFIG_MISDN_L1OIP=m
+
+#
+# mISDN hardware drivers
+#
+CONFIG_MISDN_HFCPCI=m
+CONFIG_MISDN_HFCMULTI=m
+CONFIG_MISDN_HFCUSB=m
+CONFIG_MISDN_AVMFRITZ=m
+CONFIG_MISDN_SPEEDFAX=m
+CONFIG_MISDN_INFINEON=m
+CONFIG_MISDN_W6692=m
+CONFIG_MISDN_NETJET=m
+CONFIG_MISDN_IPAC=m
+CONFIG_MISDN_ISAR=m
+CONFIG_ISDN_HDLC=m
+CONFIG_NVM=y
+# CONFIG_NVM_DEBUG is not set
+CONFIG_NVM_PBLK=m
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_LEDS=m
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INPUT_MATRIXKMAP=m
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=m
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_ADP5520=m
+CONFIG_KEYBOARD_ADP5588=m
+CONFIG_KEYBOARD_ADP5589=m
+CONFIG_KEYBOARD_ATKBD=m
+CONFIG_KEYBOARD_QT1070=m
+CONFIG_KEYBOARD_QT2160=m
+CONFIG_KEYBOARD_DLINK_DIR685=m
+CONFIG_KEYBOARD_LKKBD=m
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+CONFIG_KEYBOARD_TCA6416=m
+CONFIG_KEYBOARD_TCA8418=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_LM8323=m
+CONFIG_KEYBOARD_LM8333=m
+CONFIG_KEYBOARD_MAX7359=m
+CONFIG_KEYBOARD_MCS=m
+CONFIG_KEYBOARD_MPR121=m
+CONFIG_KEYBOARD_NEWTON=m
+CONFIG_KEYBOARD_OPENCORES=m
+CONFIG_KEYBOARD_SAMSUNG=m
+CONFIG_KEYBOARD_STOWAWAY=m
+CONFIG_KEYBOARD_SUNKBD=m
+CONFIG_KEYBOARD_STMPE=m
+CONFIG_KEYBOARD_OMAP4=m
+CONFIG_KEYBOARD_TC3589X=m
+CONFIG_KEYBOARD_TM2_TOUCHKEY=m
+CONFIG_KEYBOARD_TWL4030=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_CAP11XX=m
+CONFIG_KEYBOARD_BCM=m
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_BYD=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_PS2_FOCALTECH=y
+CONFIG_MOUSE_PS2_VMMOUSE=y
+CONFIG_MOUSE_PS2_SMBUS=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_CYAPA=m
+CONFIG_MOUSE_ELAN_I2C=m
+CONFIG_MOUSE_ELAN_I2C_I2C=y
+CONFIG_MOUSE_ELAN_I2C_SMBUS=y
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_MOUSE_GPIO=m
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+CONFIG_MOUSE_SYNAPTICS_USB=m
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_ANALOG=m
+CONFIG_JOYSTICK_A3D=m
+CONFIG_JOYSTICK_ADI=m
+CONFIG_JOYSTICK_COBRA=m
+CONFIG_JOYSTICK_GF2K=m
+CONFIG_JOYSTICK_GRIP=m
+CONFIG_JOYSTICK_GRIP_MP=m
+CONFIG_JOYSTICK_GUILLEMOT=m
+CONFIG_JOYSTICK_INTERACT=m
+CONFIG_JOYSTICK_SIDEWINDER=m
+CONFIG_JOYSTICK_TMDC=m
+CONFIG_JOYSTICK_IFORCE=m
+CONFIG_JOYSTICK_IFORCE_USB=y
+CONFIG_JOYSTICK_IFORCE_232=y
+CONFIG_JOYSTICK_WARRIOR=m
+CONFIG_JOYSTICK_MAGELLAN=m
+CONFIG_JOYSTICK_SPACEORB=m
+CONFIG_JOYSTICK_SPACEBALL=m
+CONFIG_JOYSTICK_STINGER=m
+CONFIG_JOYSTICK_TWIDJOY=m
+CONFIG_JOYSTICK_ZHENHUA=m
+CONFIG_JOYSTICK_DB9=m
+CONFIG_JOYSTICK_GAMECON=m
+CONFIG_JOYSTICK_TURBOGRAFX=m
+CONFIG_JOYSTICK_AS5011=m
+CONFIG_JOYSTICK_JOYDUMP=m
+CONFIG_JOYSTICK_XPAD=m
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_JOYSTICK_WALKERA0701=m
+CONFIG_JOYSTICK_PSXPAD_SPI=m
+CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=m
+CONFIG_TABLET_USB_AIPTEK=m
+CONFIG_TABLET_USB_GTCO=m
+CONFIG_TABLET_USB_HANWANG=m
+CONFIG_TABLET_USB_KBTAB=m
+CONFIG_TABLET_USB_PEGASUS=m
+CONFIG_TABLET_SERIAL_WACOM4=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_PROPERTIES=y
+CONFIG_TOUCHSCREEN_88PM860X=m
+CONFIG_TOUCHSCREEN_ADS7846=m
+CONFIG_TOUCHSCREEN_AD7877=m
+CONFIG_TOUCHSCREEN_AD7879=m
+CONFIG_TOUCHSCREEN_AD7879_I2C=m
+CONFIG_TOUCHSCREEN_AD7879_SPI=m
+CONFIG_TOUCHSCREEN_AR1021_I2C=m
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
+CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
+CONFIG_TOUCHSCREEN_BU21013=m
+CONFIG_TOUCHSCREEN_CHIPONE_ICN8318=m
+CONFIG_TOUCHSCREEN_CY8CTMG110=m
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
+CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
+CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
+CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
+CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
+CONFIG_TOUCHSCREEN_DA9034=m
+CONFIG_TOUCHSCREEN_DA9052=m
+CONFIG_TOUCHSCREEN_DYNAPRO=m
+CONFIG_TOUCHSCREEN_HAMPSHIRE=m
+CONFIG_TOUCHSCREEN_EETI=m
+CONFIG_TOUCHSCREEN_EGALAX=m
+CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
+CONFIG_TOUCHSCREEN_EXC3000=m
+CONFIG_TOUCHSCREEN_FUJITSU=m
+CONFIG_TOUCHSCREEN_GOODIX=m
+CONFIG_TOUCHSCREEN_HIDEEP=m
+CONFIG_TOUCHSCREEN_ILI210X=m
+CONFIG_TOUCHSCREEN_S6SY761=m
+CONFIG_TOUCHSCREEN_GUNZE=m
+CONFIG_TOUCHSCREEN_EKTF2127=m
+CONFIG_TOUCHSCREEN_ELAN=m
+CONFIG_TOUCHSCREEN_ELO=m
+CONFIG_TOUCHSCREEN_WACOM_W8001=m
+CONFIG_TOUCHSCREEN_WACOM_I2C=m
+CONFIG_TOUCHSCREEN_MAX11801=m
+CONFIG_TOUCHSCREEN_MCS5000=m
+CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
+CONFIG_TOUCHSCREEN_MTOUCH=m
+CONFIG_TOUCHSCREEN_IMX6UL_TSC=m
+CONFIG_TOUCHSCREEN_INEXIO=m
+CONFIG_TOUCHSCREEN_MK712=m
+CONFIG_TOUCHSCREEN_PENMOUNT=m
+CONFIG_TOUCHSCREEN_EDT_FT5X06=m
+CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
+CONFIG_TOUCHSCREEN_TOUCHWIN=m
+CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
+CONFIG_TOUCHSCREEN_UCB1400=m
+CONFIG_TOUCHSCREEN_PIXCIR=m
+CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
+CONFIG_TOUCHSCREEN_WM831X=m
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_TOUCHSCREEN_MC13783=m
+CONFIG_TOUCHSCREEN_USB_EGALAX=y
+CONFIG_TOUCHSCREEN_USB_PANJIT=y
+CONFIG_TOUCHSCREEN_USB_3M=y
+CONFIG_TOUCHSCREEN_USB_ITM=y
+CONFIG_TOUCHSCREEN_USB_ETURBO=y
+CONFIG_TOUCHSCREEN_USB_GUNZE=y
+CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
+CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
+CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
+CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
+CONFIG_TOUCHSCREEN_USB_GOTOP=y
+CONFIG_TOUCHSCREEN_USB_JASTEC=y
+CONFIG_TOUCHSCREEN_USB_ELO=y
+CONFIG_TOUCHSCREEN_USB_E2I=y
+CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
+CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
+CONFIG_TOUCHSCREEN_USB_NEXIO=y
+CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
+CONFIG_TOUCHSCREEN_TOUCHIT213=m
+CONFIG_TOUCHSCREEN_TSC_SERIO=m
+CONFIG_TOUCHSCREEN_TSC200X_CORE=m
+CONFIG_TOUCHSCREEN_TSC2004=m
+CONFIG_TOUCHSCREEN_TSC2005=m
+CONFIG_TOUCHSCREEN_TSC2007=m
+CONFIG_TOUCHSCREEN_TSC2007_IIO=y
+CONFIG_TOUCHSCREEN_PCAP=m
+CONFIG_TOUCHSCREEN_RM_TS=m
+CONFIG_TOUCHSCREEN_SILEAD=m
+CONFIG_TOUCHSCREEN_SIS_I2C=m
+CONFIG_TOUCHSCREEN_ST1232=m
+CONFIG_TOUCHSCREEN_STMFTS=m
+CONFIG_TOUCHSCREEN_STMPE=m
+CONFIG_TOUCHSCREEN_SUR40=m
+CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
+CONFIG_TOUCHSCREEN_SX8654=m
+CONFIG_TOUCHSCREEN_TPS6507X=m
+CONFIG_TOUCHSCREEN_ZET6223=m
+CONFIG_TOUCHSCREEN_ZFORCE=m
+CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
+CONFIG_TOUCHSCREEN_ROHM_BU21023=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_88PM860X_ONKEY=m
+CONFIG_INPUT_88PM80X_ONKEY=m
+CONFIG_INPUT_AD714X=m
+CONFIG_INPUT_AD714X_I2C=m
+CONFIG_INPUT_AD714X_SPI=m
+CONFIG_INPUT_ARIZONA_HAPTICS=m
+CONFIG_INPUT_ATMEL_CAPTOUCH=m
+CONFIG_INPUT_BMA150=m
+CONFIG_INPUT_E3X0_BUTTON=m
+CONFIG_INPUT_PCSPKR=m
+CONFIG_INPUT_MAX77693_HAPTIC=m
+CONFIG_INPUT_MAX8925_ONKEY=m
+CONFIG_INPUT_MAX8997_HAPTIC=m
+CONFIG_INPUT_MC13783_PWRBUTTON=m
+CONFIG_INPUT_MMA8450=m
+CONFIG_INPUT_APANEL=m
+CONFIG_INPUT_GP2A=m
+CONFIG_INPUT_GPIO_BEEPER=m
+CONFIG_INPUT_GPIO_DECODER=m
+CONFIG_INPUT_CPCAP_PWRBUTTON=m
+CONFIG_INPUT_ATLAS_BTNS=m
+CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_KEYSPAN_REMOTE=m
+CONFIG_INPUT_KXTJ9=m
+# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set
+CONFIG_INPUT_POWERMATE=m
+CONFIG_INPUT_YEALINK=m
+CONFIG_INPUT_CM109=m
+CONFIG_INPUT_REGULATOR_HAPTIC=m
+CONFIG_INPUT_RETU_PWRBUTTON=m
+CONFIG_INPUT_TPS65218_PWRBUTTON=m
+CONFIG_INPUT_AXP20X_PEK=m
+CONFIG_INPUT_TWL4030_PWRBUTTON=m
+CONFIG_INPUT_TWL4030_VIBRA=m
+CONFIG_INPUT_TWL6040_VIBRA=m
+CONFIG_INPUT_UINPUT=m
+CONFIG_INPUT_PALMAS_PWRBUTTON=m
+CONFIG_INPUT_PCF50633_PMU=m
+CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_PWM_BEEPER=m
+CONFIG_INPUT_PWM_VIBRA=m
+CONFIG_INPUT_RK805_PWRKEY=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+CONFIG_INPUT_DA9052_ONKEY=m
+CONFIG_INPUT_DA9055_ONKEY=m
+CONFIG_INPUT_DA9063_ONKEY=m
+CONFIG_INPUT_WM831X_ON=m
+CONFIG_INPUT_PCAP=m
+CONFIG_INPUT_ADXL34X=m
+CONFIG_INPUT_ADXL34X_I2C=m
+CONFIG_INPUT_ADXL34X_SPI=m
+CONFIG_INPUT_IMS_PCU=m
+CONFIG_INPUT_CMA3000=m
+CONFIG_INPUT_CMA3000_I2C=m
+CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
+CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
+CONFIG_INPUT_SOC_BUTTON_ARRAY=m
+CONFIG_INPUT_DRV260X_HAPTICS=m
+CONFIG_INPUT_DRV2665_HAPTICS=m
+CONFIG_INPUT_DRV2667_HAPTICS=m
+CONFIG_RMI4_CORE=m
+CONFIG_RMI4_I2C=m
+CONFIG_RMI4_SPI=m
+CONFIG_RMI4_SMB=m
+CONFIG_RMI4_F03=y
+CONFIG_RMI4_F03_SERIO=m
+CONFIG_RMI4_2D_SENSOR=y
+CONFIG_RMI4_F11=y
+CONFIG_RMI4_F12=y
+CONFIG_RMI4_F30=y
+CONFIG_RMI4_F34=y
+CONFIG_RMI4_F54=y
+CONFIG_RMI4_F55=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=m
+CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
+CONFIG_SERIO_I8042=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_CT82C710=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=m
+CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
+CONFIG_SERIO_PS2MULT=m
+CONFIG_SERIO_ARC_PS2=m
+# CONFIG_SERIO_APBPS2 is not set
+CONFIG_HYPERV_KEYBOARD=m
+CONFIG_SERIO_GPIO_PS2=m
+CONFIG_USERIO=m
+CONFIG_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_GAMEPORT_EMU10K1=m
+CONFIG_GAMEPORT_FM801=m
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_ROCKETPORT=m
+CONFIG_CYCLADES=m
+CONFIG_CYZ_INTR=y
+CONFIG_MOXA_INTELLIO=m
+CONFIG_MOXA_SMARTIO=m
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_SYNCLINK_GT=m
+CONFIG_NOZOMI=m
+CONFIG_ISI=m
+CONFIG_N_HDLC=m
+CONFIG_N_GSM=m
+CONFIG_TRACE_ROUTER=m
+CONFIG_TRACE_SINK=m
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_FINTEK=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DMA=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_EXAR=m
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_MEN_MCB=m
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_ASPEED_VUART=m
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_FSL is not set
+CONFIG_SERIAL_8250_DW=m
+CONFIG_SERIAL_8250_RT288X=y
+CONFIG_SERIAL_8250_LPSS=y
+CONFIG_SERIAL_8250_MID=y
+CONFIG_SERIAL_8250_MOXA=m
+CONFIG_SERIAL_OF_PLATFORM=m
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_MAX3100=m
+CONFIG_SERIAL_MAX310X=m
+CONFIG_SERIAL_UARTLITE=m
+CONFIG_SERIAL_UARTLITE_NR_UARTS=1
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_JSM=m
+CONFIG_SERIAL_SCCNXP=m
+CONFIG_SERIAL_SC16IS7XX_CORE=m
+CONFIG_SERIAL_SC16IS7XX=m
+CONFIG_SERIAL_SC16IS7XX_I2C=y
+CONFIG_SERIAL_SC16IS7XX_SPI=y
+CONFIG_SERIAL_ALTERA_JTAGUART=m
+CONFIG_SERIAL_ALTERA_UART=m
+CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
+CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
+CONFIG_SERIAL_IFX6X60=m
+CONFIG_SERIAL_XILINX_PS_UART=m
+CONFIG_SERIAL_ARC=m
+CONFIG_SERIAL_ARC_NR_PORTS=1
+CONFIG_SERIAL_RP2=m
+CONFIG_SERIAL_RP2_NR_UARTS=32
+CONFIG_SERIAL_FSL_LPUART=m
+CONFIG_SERIAL_CONEXANT_DIGICOLOR=m
+CONFIG_SERIAL_MEN_Z135=m
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+# CONFIG_TTY_PRINTK is not set
+CONFIG_PRINTER=m
+# CONFIG_LP_CONSOLE is not set
+CONFIG_PPDEV=m
+CONFIG_HVC_DRIVER=y
+CONFIG_HVC_IRQ=y
+CONFIG_HVC_XEN=y
+CONFIG_HVC_XEN_FRONTEND=y
+CONFIG_VIRTIO_CONSOLE=m
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DMI_DECODE=y
+# CONFIG_IPMI_PROC_INTERFACE is not set
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_SSIF=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_HW_RANDOM_INTEL=m
+CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_VIA=m
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_NVRAM=m
+CONFIG_R3964=m
+CONFIG_APPLICOM=m
+
+#
+# PCMCIA character devices
+#
+CONFIG_SYNCLINK_CS=m
+CONFIG_CARDMAN_4000=m
+CONFIG_CARDMAN_4040=m
+CONFIG_SCR24X=m
+CONFIG_IPWIRELESS=m
+CONFIG_MWAVE=m
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=256
+CONFIG_HPET=y
+CONFIG_HPET_MMAP=y
+CONFIG_HPET_MMAP_DEFAULT=y
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TCG_TPM=m
+CONFIG_HW_RANDOM_TPM=y
+CONFIG_TCG_TIS_CORE=m
+CONFIG_TCG_TIS=m
+CONFIG_TCG_TIS_SPI=m
+CONFIG_TCG_TIS_I2C_ATMEL=m
+CONFIG_TCG_TIS_I2C_INFINEON=m
+CONFIG_TCG_TIS_I2C_NUVOTON=m
+CONFIG_TCG_NSC=m
+CONFIG_TCG_ATMEL=m
+CONFIG_TCG_INFINEON=m
+CONFIG_TCG_XEN=m
+CONFIG_TCG_CRB=m
+CONFIG_TCG_VTPM_PROXY=m
+CONFIG_TCG_TIS_ST33ZP24=m
+CONFIG_TCG_TIS_ST33ZP24_I2C=m
+CONFIG_TCG_TIS_ST33ZP24_SPI=m
+CONFIG_TELCLOCK=m
+# CONFIG_DEVPORT is not set
+CONFIG_XILLYBUS=m
+CONFIG_XILLYBUS_PCIE=m
+CONFIG_XILLYBUS_OF=m
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_ACPI_I2C_OPREGION=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_MUX=m
+
+#
+# Multiplexer I2C Chip support
+#
+CONFIG_I2C_ARB_GPIO_CHALLENGE=m
+CONFIG_I2C_MUX_GPIO=m
+CONFIG_I2C_MUX_GPMUX=m
+CONFIG_I2C_MUX_LTC4306=m
+CONFIG_I2C_MUX_PCA9541=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_MUX_PINCTRL=m
+CONFIG_I2C_MUX_REG=m
+CONFIG_I2C_DEMUX_PINCTRL=m
+CONFIG_I2C_MUX_MLXCPLD=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_SMBUS=m
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD756_S4882=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_ISMT=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_CHT_WC=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_NFORCE2_S4985=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+
+#
+# ACPI drivers
+#
+CONFIG_I2C_SCMI=m
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_CBUS_GPIO=m
+CONFIG_I2C_DESIGNWARE_CORE=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PCI=m
+CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
+CONFIG_I2C_EMEV2=m
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
+CONFIG_I2C_KEMPLD=m
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_PCA_PLATFORM=m
+# CONFIG_I2C_PXA_PCI is not set
+CONFIG_I2C_RK3X=m
+CONFIG_I2C_SIMTEC=m
+CONFIG_I2C_XILINX=m
+
+#
+# External I2C/SMBus adapter drivers
+#
+CONFIG_I2C_DIOLAN_U2C=m
+CONFIG_I2C_DLN2=m
+CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_ROBOTFUZZ_OSIF=m
+CONFIG_I2C_TAOS_EVM=m
+CONFIG_I2C_TINY_USB=m
+CONFIG_I2C_VIPERBOARD=m
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_MLXCPLD=m
+CONFIG_I2C_CROS_EC_TUNNEL=m
+# CONFIG_I2C_STUB is not set
+CONFIG_I2C_SLAVE=y
+CONFIG_I2C_SLAVE_EEPROM=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ALTERA=m
+CONFIG_SPI_AXI_SPI_ENGINE=m
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_BUTTERFLY=m
+CONFIG_SPI_CADENCE=m
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_DW_PCI=m
+CONFIG_SPI_DW_MID_DMA=y
+CONFIG_SPI_DW_MMIO=m
+CONFIG_SPI_DLN2=m
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_LM70_LLP=m
+CONFIG_SPI_FSL_LIB=m
+CONFIG_SPI_FSL_SPI=m
+CONFIG_SPI_OC_TINY=m
+CONFIG_SPI_PXA2XX=m
+CONFIG_SPI_PXA2XX_PCI=m
+CONFIG_SPI_ROCKCHIP=m
+CONFIG_SPI_SC18IS602=m
+CONFIG_SPI_XCOMM=m
+CONFIG_SPI_XILINX=m
+CONFIG_SPI_ZYNQMP_GQSPI=m
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPI_LOOPBACK_TEST=m
+CONFIG_SPI_TLE62X0=m
+CONFIG_SPI_SLAVE=y
+CONFIG_SPI_SLAVE_TIME=m
+CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
+CONFIG_SPMI=m
+CONFIG_HSI=m
+CONFIG_HSI_BOARDINFO=y
+
+#
+# HSI controllers
+#
+
+#
+# HSI clients
+#
+CONFIG_HSI_CHAR=m
+CONFIG_PPS=m
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+CONFIG_PPS_CLIENT_KTIMER=m
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PPS_CLIENT_GPIO=m
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_DP83640_PHY=m
+CONFIG_PTP_1588_CLOCK_KVM=m
+CONFIG_PINCTRL=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_PINMUX=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_PINCONF=y
+CONFIG_GENERIC_PINCONF=y
+# CONFIG_DEBUG_PINCTRL is not set
+CONFIG_PINCTRL_AS3722=m
+CONFIG_PINCTRL_AXP209=m
+CONFIG_PINCTRL_AMD=m
+CONFIG_PINCTRL_MCP23S08=m
+CONFIG_PINCTRL_SINGLE=m
+CONFIG_PINCTRL_SX150X=y
+CONFIG_PINCTRL_MAX77620=m
+CONFIG_PINCTRL_PALMAS=m
+CONFIG_PINCTRL_RK805=m
+CONFIG_PINCTRL_BAYTRAIL=y
+CONFIG_PINCTRL_CHERRYVIEW=y
+CONFIG_PINCTRL_INTEL=y
+CONFIG_PINCTRL_BROXTON=y
+CONFIG_PINCTRL_CANNONLAKE=y
+CONFIG_PINCTRL_CEDARFORK=y
+CONFIG_PINCTRL_DENVERTON=y
+CONFIG_PINCTRL_GEMINILAKE=y
+CONFIG_PINCTRL_LEWISBURG=y
+CONFIG_PINCTRL_SUNRISEPOINT=y
+CONFIG_GPIOLIB=y
+CONFIG_OF_GPIO=y
+CONFIG_GPIO_ACPI=y
+CONFIG_GPIOLIB_IRQCHIP=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC=y
+CONFIG_GPIO_MAX730X=m
+
+#
+# Memory mapped GPIO drivers
+#
+CONFIG_GPIO_74XX_MMIO=m
+CONFIG_GPIO_ALTERA=m
+CONFIG_GPIO_AMDPT=m
+CONFIG_GPIO_DWAPB=m
+CONFIG_GPIO_EXAR=m
+CONFIG_GPIO_FTGPIO010=y
+CONFIG_GPIO_GENERIC_PLATFORM=m
+CONFIG_GPIO_GRGPIO=m
+CONFIG_GPIO_ICH=m
+CONFIG_GPIO_LYNXPOINT=m
+CONFIG_GPIO_MB86S7X=m
+CONFIG_GPIO_MENZ127=m
+CONFIG_GPIO_MOCKUP=m
+CONFIG_GPIO_SYSCON=m
+CONFIG_GPIO_VX855=m
+CONFIG_GPIO_XILINX=m
+
+#
+# Port-mapped I/O GPIO drivers
+#
+CONFIG_GPIO_F7188X=m
+CONFIG_GPIO_IT87=m
+CONFIG_GPIO_SCH=m
+CONFIG_GPIO_SCH311X=m
+
+#
+# I2C GPIO expanders
+#
+CONFIG_GPIO_ADP5588=m
+CONFIG_GPIO_ADNP=m
+CONFIG_GPIO_MAX7300=m
+CONFIG_GPIO_MAX732X=m
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_TPIC2810=m
+
+#
+# MFD GPIO expanders
+#
+CONFIG_GPIO_ADP5520=m
+CONFIG_GPIO_ARIZONA=m
+CONFIG_GPIO_BD9571MWV=m
+CONFIG_GPIO_CRYSTAL_COVE=m
+CONFIG_GPIO_DA9052=m
+CONFIG_GPIO_DA9055=m
+CONFIG_GPIO_DLN2=m
+CONFIG_GPIO_JANZ_TTL=m
+CONFIG_GPIO_KEMPLD=m
+CONFIG_GPIO_LP3943=m
+CONFIG_GPIO_LP873X=m
+CONFIG_GPIO_LP87565=m
+CONFIG_GPIO_MAX77620=m
+CONFIG_GPIO_PALMAS=y
+CONFIG_GPIO_RC5T583=y
+CONFIG_GPIO_STMPE=y
+CONFIG_GPIO_TC3589X=y
+CONFIG_GPIO_TPS65086=m
+CONFIG_GPIO_TPS65218=m
+CONFIG_GPIO_TPS6586X=y
+CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TPS65912=m
+CONFIG_GPIO_TPS68470=y
+CONFIG_GPIO_TWL4030=m
+CONFIG_GPIO_TWL6040=m
+CONFIG_GPIO_UCB1400=m
+CONFIG_GPIO_WHISKEY_COVE=m
+CONFIG_GPIO_WM831X=m
+CONFIG_GPIO_WM8350=m
+CONFIG_GPIO_WM8994=m
+
+#
+# PCI GPIO expanders
+#
+CONFIG_GPIO_AMD8111=m
+CONFIG_GPIO_ML_IOH=m
+CONFIG_GPIO_PCI_IDIO_16=m
+CONFIG_GPIO_PCIE_IDIO_24=m
+CONFIG_GPIO_RDC321X=m
+CONFIG_GPIO_SODAVILLE=y
+
+#
+# SPI GPIO expanders
+#
+CONFIG_GPIO_74X164=m
+CONFIG_GPIO_MAX3191X=m
+CONFIG_GPIO_MAX7301=m
+CONFIG_GPIO_MC33880=m
+CONFIG_GPIO_PISOSR=m
+CONFIG_GPIO_XRA1403=m
+
+#
+# USB GPIO expanders
+#
+CONFIG_GPIO_VIPERBOARD=m
+CONFIG_W1=m
+CONFIG_W1_CON=y
+
+#
+# 1-wire Bus Masters
+#
+CONFIG_W1_MASTER_MATROX=m
+CONFIG_W1_MASTER_DS2490=m
+CONFIG_W1_MASTER_DS2482=m
+CONFIG_W1_MASTER_DS1WM=m
+CONFIG_W1_MASTER_GPIO=m
+
+#
+# 1-wire Slaves
+#
+CONFIG_W1_SLAVE_THERM=m
+CONFIG_W1_SLAVE_SMEM=m
+CONFIG_W1_SLAVE_DS2405=m
+CONFIG_W1_SLAVE_DS2408=m
+# CONFIG_W1_SLAVE_DS2408_READBACK is not set
+CONFIG_W1_SLAVE_DS2413=m
+CONFIG_W1_SLAVE_DS2406=m
+CONFIG_W1_SLAVE_DS2423=m
+CONFIG_W1_SLAVE_DS2805=m
+CONFIG_W1_SLAVE_DS2431=m
+CONFIG_W1_SLAVE_DS2433=m
+# CONFIG_W1_SLAVE_DS2433_CRC is not set
+CONFIG_W1_SLAVE_DS2438=m
+CONFIG_W1_SLAVE_DS2760=m
+CONFIG_W1_SLAVE_DS2780=m
+CONFIG_W1_SLAVE_DS2781=m
+CONFIG_W1_SLAVE_DS28E04=m
+CONFIG_W1_SLAVE_DS28E17=m
+CONFIG_POWER_AVS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_LTC2952=y
+CONFIG_POWER_RESET_RESTART=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+CONFIG_REBOOT_MODE=m
+CONFIG_SYSCON_REBOOT_MODE=m
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PDA_POWER=m
+CONFIG_GENERIC_ADC_BATTERY=m
+CONFIG_MAX8925_POWER=m
+CONFIG_WM831X_BACKUP=m
+CONFIG_WM831X_POWER=m
+CONFIG_WM8350_POWER=m
+CONFIG_TEST_POWER=m
+CONFIG_BATTERY_88PM860X=m
+CONFIG_BATTERY_ACT8945A=m
+CONFIG_BATTERY_CPCAP=m
+CONFIG_BATTERY_DS2760=m
+CONFIG_BATTERY_DS2780=m
+CONFIG_BATTERY_DS2781=m
+CONFIG_BATTERY_DS2782=m
+CONFIG_BATTERY_LEGO_EV3=m
+CONFIG_BATTERY_SBS=m
+CONFIG_CHARGER_SBS=m
+CONFIG_MANAGER_SBS=m
+CONFIG_BATTERY_BQ27XXX=m
+CONFIG_BATTERY_BQ27XXX_I2C=m
+CONFIG_BATTERY_BQ27XXX_HDQ=m
+# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
+CONFIG_BATTERY_DA9030=m
+CONFIG_BATTERY_DA9052=m
+CONFIG_CHARGER_DA9150=m
+CONFIG_BATTERY_DA9150=m
+CONFIG_CHARGER_AXP20X=m
+CONFIG_BATTERY_AXP20X=m
+CONFIG_AXP20X_POWER=m
+CONFIG_AXP288_CHARGER=m
+CONFIG_AXP288_FUEL_GAUGE=m
+CONFIG_BATTERY_MAX17040=m
+CONFIG_BATTERY_MAX17042=m
+CONFIG_BATTERY_MAX1721X=m
+CONFIG_BATTERY_TWL4030_MADC=m
+CONFIG_CHARGER_88PM860X=m
+CONFIG_CHARGER_PCF50633=m
+CONFIG_BATTERY_RX51=m
+CONFIG_CHARGER_ISP1704=m
+CONFIG_CHARGER_MAX8903=m
+CONFIG_CHARGER_TWL4030=m
+CONFIG_CHARGER_LP8727=m
+CONFIG_CHARGER_LP8788=m
+CONFIG_CHARGER_GPIO=m
+CONFIG_CHARGER_MANAGER=y
+CONFIG_CHARGER_LTC3651=m
+CONFIG_CHARGER_MAX14577=m
+CONFIG_CHARGER_DETECTOR_MAX14656=m
+CONFIG_CHARGER_MAX77693=m
+CONFIG_CHARGER_MAX8997=m
+CONFIG_CHARGER_MAX8998=m
+CONFIG_CHARGER_BQ2415X=m
+CONFIG_CHARGER_BQ24190=m
+CONFIG_CHARGER_BQ24257=m
+CONFIG_CHARGER_BQ24735=m
+CONFIG_CHARGER_BQ25890=m
+CONFIG_CHARGER_SMB347=m
+CONFIG_CHARGER_TPS65090=m
+CONFIG_CHARGER_TPS65217=m
+CONFIG_BATTERY_GAUGE_LTC2941=m
+CONFIG_BATTERY_RT5033=m
+CONFIG_CHARGER_RT9455=m
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=m
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+CONFIG_SENSORS_ABITUGURU=m
+CONFIG_SENSORS_ABITUGURU3=m
+CONFIG_SENSORS_AD7314=m
+CONFIG_SENSORS_AD7414=m
+CONFIG_SENSORS_AD7418=m
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1026=m
+CONFIG_SENSORS_ADM1029=m
+CONFIG_SENSORS_ADM1031=m
+CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_ADT7X10=m
+CONFIG_SENSORS_ADT7310=m
+CONFIG_SENSORS_ADT7410=m
+CONFIG_SENSORS_ADT7411=m
+CONFIG_SENSORS_ADT7462=m
+CONFIG_SENSORS_ADT7470=m
+CONFIG_SENSORS_ADT7475=m
+CONFIG_SENSORS_ASC7621=m
+CONFIG_SENSORS_K8TEMP=m
+CONFIG_SENSORS_K10TEMP=m
+CONFIG_SENSORS_FAM15H_POWER=m
+CONFIG_SENSORS_APPLESMC=m
+CONFIG_SENSORS_ASB100=m
+CONFIG_SENSORS_ASPEED=m
+CONFIG_SENSORS_ATXP1=m
+CONFIG_SENSORS_DS620=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_DELL_SMM=m
+CONFIG_SENSORS_DA9052_ADC=m
+CONFIG_SENSORS_DA9055=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71805F=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_F75375S=m
+CONFIG_SENSORS_MC13783_ADC=m
+CONFIG_SENSORS_FSCHMD=m
+CONFIG_SENSORS_FTSTEUTATES=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_G760A=m
+CONFIG_SENSORS_G762=m
+CONFIG_SENSORS_GPIO_FAN=m
+CONFIG_SENSORS_HIH6130=m
+CONFIG_SENSORS_IBMAEM=m
+CONFIG_SENSORS_IBMPEX=m
+CONFIG_SENSORS_IIO_HWMON=m
+CONFIG_SENSORS_I5500=m
+CONFIG_SENSORS_CORETEMP=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_JC42=m
+CONFIG_SENSORS_POWR1220=m
+CONFIG_SENSORS_LINEAGE=m
+CONFIG_SENSORS_LTC2945=m
+CONFIG_SENSORS_LTC2990=m
+CONFIG_SENSORS_LTC4151=m
+CONFIG_SENSORS_LTC4215=m
+CONFIG_SENSORS_LTC4222=m
+CONFIG_SENSORS_LTC4245=m
+CONFIG_SENSORS_LTC4260=m
+CONFIG_SENSORS_LTC4261=m
+CONFIG_SENSORS_MAX1111=m
+CONFIG_SENSORS_MAX16065=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_MAX1668=m
+CONFIG_SENSORS_MAX197=m
+CONFIG_SENSORS_MAX31722=m
+CONFIG_SENSORS_MAX6621=m
+CONFIG_SENSORS_MAX6639=m
+CONFIG_SENSORS_MAX6642=m
+CONFIG_SENSORS_MAX6650=m
+CONFIG_SENSORS_MAX6697=m
+CONFIG_SENSORS_MAX31790=m
+CONFIG_SENSORS_MCP3021=m
+CONFIG_SENSORS_TC654=m
+CONFIG_SENSORS_MENF21BMC_HWMON=m
+CONFIG_SENSORS_ADCXX=m
+CONFIG_SENSORS_LM63=m
+CONFIG_SENSORS_LM70=m
+CONFIG_SENSORS_LM73=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM87=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM92=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_LM95234=m
+CONFIG_SENSORS_LM95241=m
+CONFIG_SENSORS_LM95245=m
+CONFIG_SENSORS_PC87360=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_NTC_THERMISTOR=m
+CONFIG_SENSORS_NCT6683=m
+CONFIG_SENSORS_NCT6775=m
+CONFIG_SENSORS_NCT7802=m
+CONFIG_SENSORS_NCT7904=m
+CONFIG_SENSORS_PCF8591=m
+CONFIG_PMBUS=m
+CONFIG_SENSORS_PMBUS=m
+CONFIG_SENSORS_ADM1275=m
+CONFIG_SENSORS_IBM_CFFPS=m
+CONFIG_SENSORS_IR35221=m
+CONFIG_SENSORS_LM25066=m
+CONFIG_SENSORS_LTC2978=m
+# CONFIG_SENSORS_LTC2978_REGULATOR is not set
+CONFIG_SENSORS_LTC3815=m
+CONFIG_SENSORS_MAX16064=m
+CONFIG_SENSORS_MAX20751=m
+CONFIG_SENSORS_MAX31785=m
+CONFIG_SENSORS_MAX34440=m
+CONFIG_SENSORS_MAX8688=m
+CONFIG_SENSORS_TPS40422=m
+CONFIG_SENSORS_TPS53679=m
+CONFIG_SENSORS_UCD9000=m
+CONFIG_SENSORS_UCD9200=m
+CONFIG_SENSORS_ZL6100=m
+CONFIG_SENSORS_PWM_FAN=m
+CONFIG_SENSORS_SHT15=m
+CONFIG_SENSORS_SHT21=m
+CONFIG_SENSORS_SHT3x=m
+CONFIG_SENSORS_SHTC1=m
+CONFIG_SENSORS_SIS5595=m
+CONFIG_SENSORS_DME1737=m
+CONFIG_SENSORS_EMC1403=m
+CONFIG_SENSORS_EMC2103=m
+CONFIG_SENSORS_EMC6W201=m
+CONFIG_SENSORS_SMSC47M1=m
+CONFIG_SENSORS_SMSC47M192=m
+CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_SCH56XX_COMMON=m
+CONFIG_SENSORS_SCH5627=m
+CONFIG_SENSORS_SCH5636=m
+CONFIG_SENSORS_STTS751=m
+CONFIG_SENSORS_SMM665=m
+CONFIG_SENSORS_ADC128D818=m
+CONFIG_SENSORS_ADS1015=m
+CONFIG_SENSORS_ADS7828=m
+CONFIG_SENSORS_ADS7871=m
+CONFIG_SENSORS_AMC6821=m
+CONFIG_SENSORS_INA209=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_SENSORS_INA3221=m
+CONFIG_SENSORS_TC74=m
+CONFIG_SENSORS_THMC50=m
+CONFIG_SENSORS_TMP102=m
+CONFIG_SENSORS_TMP103=m
+CONFIG_SENSORS_TMP108=m
+CONFIG_SENSORS_TMP401=m
+CONFIG_SENSORS_TMP421=m
+CONFIG_SENSORS_VIA_CPUTEMP=m
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83773G=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83791D=m
+CONFIG_SENSORS_W83792D=m
+CONFIG_SENSORS_W83793=m
+CONFIG_SENSORS_W83795=m
+# CONFIG_SENSORS_W83795_FANCTRL is not set
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83L786NG=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_SENSORS_WM831X=m
+CONFIG_SENSORS_WM8350=m
+CONFIG_SENSORS_XGENE=m
+
+#
+# ACPI drivers
+#
+CONFIG_SENSORS_ACPI_POWER=m
+CONFIG_SENSORS_ATK0110=m
+CONFIG_THERMAL=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_OF=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CLOCK_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+# CONFIG_THERMAL_EMULATION is not set
+CONFIG_MAX77620_THERMAL=m
+CONFIG_QORIQ_THERMAL=m
+CONFIG_DA9062_THERMAL=m
+CONFIG_INTEL_POWERCLAMP=m
+CONFIG_X86_PKG_TEMP_THERMAL=m
+CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
+CONFIG_INTEL_SOC_DTS_THERMAL=m
+
+#
+# ACPI INT340X thermal drivers
+#
+CONFIG_INT340X_THERMAL=m
+CONFIG_ACPI_THERMAL_REL=m
+CONFIG_INT3406_THERMAL=m
+CONFIG_INTEL_BXT_PMIC_THERMAL=m
+CONFIG_INTEL_PCH_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_GENERIC_ADC_THERMAL=m
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
+CONFIG_WATCHDOG_SYSFS=y
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=m
+# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set
+CONFIG_DA9052_WATCHDOG=m
+CONFIG_DA9055_WATCHDOG=m
+CONFIG_DA9063_WATCHDOG=m
+CONFIG_DA9062_WATCHDOG=m
+CONFIG_GPIO_WATCHDOG=m
+CONFIG_MENF21BMC_WATCHDOG=m
+CONFIG_WDAT_WDT=m
+CONFIG_WM831X_WATCHDOG=m
+CONFIG_WM8350_WATCHDOG=m
+CONFIG_XILINX_WATCHDOG=m
+CONFIG_ZIIRAVE_WATCHDOG=m
+CONFIG_RAVE_SP_WATCHDOG=m
+CONFIG_CADENCE_WATCHDOG=m
+CONFIG_DW_WATCHDOG=m
+CONFIG_RN5T618_WATCHDOG=m
+CONFIG_TWL4030_WATCHDOG=m
+CONFIG_MAX63XX_WATCHDOG=m
+CONFIG_MAX77620_WATCHDOG=m
+CONFIG_RETU_WATCHDOG=m
+CONFIG_ACQUIRE_WDT=m
+CONFIG_ADVANTECH_WDT=m
+CONFIG_ALIM1535_WDT=m
+CONFIG_ALIM7101_WDT=m
+CONFIG_F71808E_WDT=m
+CONFIG_SP5100_TCO=m
+CONFIG_SBC_FITPC2_WATCHDOG=m
+CONFIG_EUROTECH_WDT=m
+CONFIG_IB700_WDT=m
+CONFIG_IBMASR=m
+CONFIG_WAFER_WDT=m
+CONFIG_I6300ESB_WDT=m
+CONFIG_IE6XX_WDT=m
+CONFIG_ITCO_WDT=m
+CONFIG_ITCO_VENDOR_SUPPORT=y
+CONFIG_IT8712F_WDT=m
+CONFIG_IT87_WDT=m
+CONFIG_HP_WATCHDOG=m
+CONFIG_KEMPLD_WDT=m
+CONFIG_HPWDT_NMI_DECODING=y
+CONFIG_SC1200_WDT=m
+CONFIG_PC87413_WDT=m
+CONFIG_NV_TCO=m
+CONFIG_60XX_WDT=m
+CONFIG_CPU5_WDT=m
+CONFIG_SMSC_SCH311X_WDT=m
+CONFIG_SMSC37B787_WDT=m
+CONFIG_VIA_WDT=m
+CONFIG_W83627HF_WDT=m
+CONFIG_W83877F_WDT=m
+CONFIG_W83977F_WDT=m
+CONFIG_MACHZ_WDT=m
+CONFIG_SBC_EPX_C3_WATCHDOG=m
+# CONFIG_INTEL_MEI_WDT is not set
+CONFIG_NI903X_WDT=m
+CONFIG_NIC7018_WDT=m
+CONFIG_MEN_A21_WDT=m
+CONFIG_XEN_WDT=m
+
+#
+# PCI-based Watchdog Cards
+#
+CONFIG_PCIPCWATCHDOG=m
+CONFIG_WDTPCI=m
+
+#
+# USB-based Watchdog Cards
+#
+CONFIG_USBPCWATCHDOG=m
+
+#
+# Watchdog Pretimeout Governors
+#
+CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
+# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set
+CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m
+CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y
+CONFIG_SSB_POSSIBLE=y
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+CONFIG_SSB_PCMCIAHOST=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_SSB_DRIVER_GPIO=y
+CONFIG_BCMA_POSSIBLE=y
+CONFIG_BCMA=m
+CONFIG_BCMA_BLOCKIO=y
+CONFIG_BCMA_HOST_PCI_POSSIBLE=y
+CONFIG_BCMA_HOST_PCI=y
+# CONFIG_BCMA_HOST_SOC is not set
+CONFIG_BCMA_DRIVER_PCI=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
+# CONFIG_BCMA_DEBUG is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+CONFIG_MFD_ACT8945A=m
+CONFIG_MFD_AS3711=y
+CONFIG_MFD_AS3722=m
+CONFIG_PMIC_ADP5520=y
+CONFIG_MFD_AAT2870_CORE=y
+CONFIG_MFD_ATMEL_FLEXCOM=m
+CONFIG_MFD_ATMEL_HLCDC=m
+CONFIG_MFD_BCM590XX=m
+CONFIG_MFD_BD9571MWV=m
+CONFIG_MFD_AXP20X=m
+CONFIG_MFD_AXP20X_I2C=m
+CONFIG_MFD_CROS_EC=m
+CONFIG_MFD_CROS_EC_I2C=m
+CONFIG_MFD_CROS_EC_SPI=m
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_PMIC_DA903X=y
+CONFIG_PMIC_DA9052=y
+CONFIG_MFD_DA9052_SPI=y
+CONFIG_MFD_DA9052_I2C=y
+CONFIG_MFD_DA9055=y
+CONFIG_MFD_DA9062=m
+CONFIG_MFD_DA9063=m
+CONFIG_MFD_DA9150=m
+CONFIG_MFD_DLN2=m
+CONFIG_MFD_MC13XXX=m
+CONFIG_MFD_MC13XXX_SPI=m
+CONFIG_MFD_MC13XXX_I2C=m
+CONFIG_MFD_HI6421_PMIC=m
+CONFIG_HTC_PASIC3=m
+CONFIG_HTC_I2CPLD=y
+CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
+CONFIG_LPC_ICH=m
+CONFIG_LPC_SCH=m
+CONFIG_INTEL_SOC_PMIC=y
+CONFIG_INTEL_SOC_PMIC_BXTWC=m
+CONFIG_INTEL_SOC_PMIC_CHTWC=y
+CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m
+CONFIG_MFD_INTEL_LPSS=m
+CONFIG_MFD_INTEL_LPSS_ACPI=m
+CONFIG_MFD_INTEL_LPSS_PCI=m
+CONFIG_MFD_JANZ_CMODIO=m
+CONFIG_MFD_KEMPLD=m
+CONFIG_MFD_88PM800=m
+CONFIG_MFD_88PM805=m
+CONFIG_MFD_88PM860X=y
+CONFIG_MFD_MAX14577=m
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_MAX77686=m
+CONFIG_MFD_MAX77693=m
+CONFIG_MFD_MAX77843=y
+CONFIG_MFD_MAX8907=m
+CONFIG_MFD_MAX8925=y
+CONFIG_MFD_MAX8997=y
+CONFIG_MFD_MAX8998=y
+CONFIG_MFD_MT6397=m
+CONFIG_MFD_MENF21BMC=m
+CONFIG_EZX_PCAP=y
+CONFIG_MFD_CPCAP=m
+CONFIG_MFD_VIPERBOARD=m
+CONFIG_MFD_RETU=m
+CONFIG_MFD_PCF50633=m
+CONFIG_PCF50633_ADC=m
+CONFIG_PCF50633_GPIO=m
+CONFIG_UCB1400_CORE=m
+CONFIG_MFD_RDC321X=m
+CONFIG_MFD_RT5033=m
+CONFIG_MFD_RC5T583=y
+CONFIG_MFD_RK808=m
+CONFIG_MFD_RN5T618=m
+CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SI476X_CORE=m
+CONFIG_MFD_SM501=m
+CONFIG_MFD_SM501_GPIO=y
+CONFIG_MFD_SKY81452=m
+CONFIG_MFD_SMSC=y
+CONFIG_ABX500_CORE=y
+CONFIG_AB3100_CORE=y
+CONFIG_AB3100_OTP=y
+CONFIG_MFD_STMPE=y
+
+#
+# STMicroelectronics STMPE Interface Drivers
+#
+CONFIG_STMPE_I2C=y
+CONFIG_STMPE_SPI=y
+CONFIG_MFD_SYSCON=y
+CONFIG_MFD_TI_AM335X_TSCADC=m
+CONFIG_MFD_LP3943=m
+CONFIG_MFD_LP8788=y
+CONFIG_MFD_TI_LMU=m
+CONFIG_MFD_PALMAS=y
+CONFIG_TPS6105X=m
+CONFIG_TPS65010=m
+CONFIG_TPS6507X=m
+CONFIG_MFD_TPS65086=m
+CONFIG_MFD_TPS65090=y
+CONFIG_MFD_TPS65217=m
+CONFIG_MFD_TPS68470=y
+CONFIG_MFD_TI_LP873X=m
+CONFIG_MFD_TI_LP87565=m
+CONFIG_MFD_TPS65218=m
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_TPS65910=y
+CONFIG_MFD_TPS65912=m
+CONFIG_MFD_TPS65912_I2C=m
+CONFIG_MFD_TPS65912_SPI=m
+CONFIG_MFD_TPS80031=y
+CONFIG_TWL4030_CORE=y
+CONFIG_MFD_TWL4030_AUDIO=y
+CONFIG_TWL6040_CORE=y
+CONFIG_MFD_WL1273_CORE=m
+CONFIG_MFD_LM3533=m
+CONFIG_MFD_TC3589X=y
+# CONFIG_MFD_TMIO is not set
+CONFIG_MFD_VX855=m
+CONFIG_MFD_ARIZONA=y
+CONFIG_MFD_ARIZONA_I2C=m
+CONFIG_MFD_ARIZONA_SPI=m
+CONFIG_MFD_CS47L24=y
+CONFIG_MFD_WM5102=y
+CONFIG_MFD_WM5110=y
+CONFIG_MFD_WM8997=y
+CONFIG_MFD_WM8998=y
+CONFIG_MFD_WM8400=y
+CONFIG_MFD_WM831X=y
+CONFIG_MFD_WM831X_I2C=y
+CONFIG_MFD_WM831X_SPI=y
+CONFIG_MFD_WM8350=y
+CONFIG_MFD_WM8350_I2C=y
+CONFIG_MFD_WM8994=m
+CONFIG_RAVE_SP_CORE=m
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_REGULATOR_88PM800=m
+CONFIG_REGULATOR_88PM8607=m
+CONFIG_REGULATOR_ACT8865=m
+CONFIG_REGULATOR_ACT8945A=m
+CONFIG_REGULATOR_AD5398=m
+CONFIG_REGULATOR_ANATOP=m
+CONFIG_REGULATOR_AAT2870=m
+CONFIG_REGULATOR_AB3100=m
+CONFIG_REGULATOR_ARIZONA_LDO1=m
+CONFIG_REGULATOR_ARIZONA_MICSUPP=m
+CONFIG_REGULATOR_AS3711=m
+CONFIG_REGULATOR_AS3722=m
+CONFIG_REGULATOR_AXP20X=m
+CONFIG_REGULATOR_BCM590XX=m
+CONFIG_REGULATOR_BD9571MWV=m
+CONFIG_REGULATOR_CPCAP=m
+CONFIG_REGULATOR_DA903X=m
+CONFIG_REGULATOR_DA9052=m
+CONFIG_REGULATOR_DA9055=m
+CONFIG_REGULATOR_DA9062=m
+CONFIG_REGULATOR_DA9063=m
+CONFIG_REGULATOR_DA9210=m
+CONFIG_REGULATOR_DA9211=m
+CONFIG_REGULATOR_FAN53555=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_REGULATOR_HI6421=m
+CONFIG_REGULATOR_HI6421V530=m
+CONFIG_REGULATOR_ISL9305=m
+CONFIG_REGULATOR_ISL6271A=m
+CONFIG_REGULATOR_LM363X=m
+CONFIG_REGULATOR_LP3971=m
+CONFIG_REGULATOR_LP3972=m
+CONFIG_REGULATOR_LP872X=m
+CONFIG_REGULATOR_LP873X=m
+CONFIG_REGULATOR_LP8755=m
+CONFIG_REGULATOR_LP87565=m
+CONFIG_REGULATOR_LP8788=m
+CONFIG_REGULATOR_LTC3589=m
+CONFIG_REGULATOR_LTC3676=m
+CONFIG_REGULATOR_MAX14577=m
+CONFIG_REGULATOR_MAX1586=m
+CONFIG_REGULATOR_MAX77620=m
+CONFIG_REGULATOR_MAX8649=m
+CONFIG_REGULATOR_MAX8660=m
+CONFIG_REGULATOR_MAX8907=m
+CONFIG_REGULATOR_MAX8925=m
+CONFIG_REGULATOR_MAX8952=m
+CONFIG_REGULATOR_MAX8973=m
+CONFIG_REGULATOR_MAX8997=m
+CONFIG_REGULATOR_MAX8998=m
+CONFIG_REGULATOR_MAX77686=m
+CONFIG_REGULATOR_MAX77693=m
+CONFIG_REGULATOR_MAX77802=m
+CONFIG_REGULATOR_MC13XXX_CORE=m
+CONFIG_REGULATOR_MC13783=m
+CONFIG_REGULATOR_MC13892=m
+CONFIG_REGULATOR_MT6311=m
+CONFIG_REGULATOR_MT6323=m
+CONFIG_REGULATOR_MT6397=m
+CONFIG_REGULATOR_PALMAS=m
+CONFIG_REGULATOR_PCAP=m
+CONFIG_REGULATOR_PCF50633=m
+CONFIG_REGULATOR_PFUZE100=m
+CONFIG_REGULATOR_PV88060=m
+CONFIG_REGULATOR_PV88080=m
+CONFIG_REGULATOR_PV88090=m
+CONFIG_REGULATOR_PWM=m
+CONFIG_REGULATOR_QCOM_SPMI=m
+CONFIG_REGULATOR_RC5T583=m
+CONFIG_REGULATOR_RK808=m
+CONFIG_REGULATOR_RN5T618=m
+CONFIG_REGULATOR_RT5033=m
+CONFIG_REGULATOR_S2MPA01=m
+CONFIG_REGULATOR_S2MPS11=m
+CONFIG_REGULATOR_S5M8767=m
+CONFIG_REGULATOR_SKY81452=m
+CONFIG_REGULATOR_TPS51632=m
+CONFIG_REGULATOR_TPS6105X=m
+CONFIG_REGULATOR_TPS62360=m
+CONFIG_REGULATOR_TPS65023=m
+CONFIG_REGULATOR_TPS6507X=m
+CONFIG_REGULATOR_TPS65086=m
+CONFIG_REGULATOR_TPS65090=m
+CONFIG_REGULATOR_TPS65132=m
+CONFIG_REGULATOR_TPS65217=m
+CONFIG_REGULATOR_TPS65218=m
+CONFIG_REGULATOR_TPS6524X=m
+CONFIG_REGULATOR_TPS6586X=m
+CONFIG_REGULATOR_TPS65910=m
+CONFIG_REGULATOR_TPS65912=m
+CONFIG_REGULATOR_TPS80031=m
+CONFIG_REGULATOR_TWL4030=m
+CONFIG_REGULATOR_VCTRL=m
+CONFIG_REGULATOR_WM831X=m
+CONFIG_REGULATOR_WM8350=m
+CONFIG_REGULATOR_WM8400=m
+CONFIG_REGULATOR_WM8994=m
+CONFIG_CEC_CORE=m
+CONFIG_CEC_NOTIFIER=y
+CONFIG_CEC_PIN=y
+CONFIG_RC_CORE=m
+CONFIG_RC_MAP=m
+CONFIG_LIRC=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_SHARP_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_XMP_DECODER=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_ATI_REMOTE=m
+CONFIG_IR_ENE=m
+CONFIG_IR_HIX5HD2=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_ITE_CIR=m
+CONFIG_IR_FINTEK=m
+CONFIG_IR_NUVOTON=m
+CONFIG_IR_REDRAT3=m
+CONFIG_IR_SPI=m
+CONFIG_IR_STREAMZAP=m
+CONFIG_IR_WINBOND_CIR=m
+CONFIG_IR_IGORPLUGUSB=m
+CONFIG_IR_IGUANA=m
+CONFIG_IR_TTUSBIR=m
+CONFIG_RC_LOOPBACK=m
+CONFIG_IR_GPIO_CIR=m
+CONFIG_IR_GPIO_TX=m
+CONFIG_IR_PWM_TX=m
+CONFIG_IR_SERIAL=m
+CONFIG_IR_SERIAL_TRANSMITTER=y
+CONFIG_IR_SIR=m
+CONFIG_MEDIA_SUPPORT=m
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_SDR_SUPPORT=y
+CONFIG_MEDIA_CEC_SUPPORT=y
+CONFIG_MEDIA_CEC_RC=y
+CONFIG_MEDIA_CONTROLLER=y
+# CONFIG_MEDIA_CONTROLLER_DVB is not set
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=m
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_PCI_SKELETON is not set
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_V4L2_FLASH_LED_CLASS=m
+CONFIG_V4L2_FWNODE=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_DMA_SG=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_DVB_CORE=m
+CONFIG_DVB_MMAP=y
+CONFIG_DVB_NET=y
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_MAX_ADAPTERS=16
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
+# CONFIG_DVB_ULE_DEBUG is not set
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_DTCS033=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STK1135=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TOUPTEK=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_VIDEO_CPIA2=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+CONFIG_VIDEO_USBTV=m
+
+#
+# Analog TV USB devices
+#
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+CONFIG_VIDEO_HDPVR=m
+CONFIG_VIDEO_USBVISION=m
+CONFIG_VIDEO_STK1160_COMMON=m
+CONFIG_VIDEO_STK1160=m
+CONFIG_VIDEO_GO7007=m
+CONFIG_VIDEO_GO7007_USB=m
+CONFIG_VIDEO_GO7007_LOADER=m
+CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
+
+#
+# Analog/digital TV USB devices
+#
+CONFIG_VIDEO_AU0828=m
+CONFIG_VIDEO_AU0828_V4L2=y
+CONFIG_VIDEO_AU0828_RC=y
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_TM6000=m
+CONFIG_VIDEO_TM6000_ALSA=m
+CONFIG_VIDEO_TM6000_DVB=m
+
+#
+# Digital TV USB devices
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_DIB3000MC=m
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+CONFIG_DVB_USB_PCTV452E=m
+CONFIG_DVB_USB_DW2102=m
+CONFIG_DVB_USB_CINERGY_T2=m
+CONFIG_DVB_USB_DTV5100=m
+CONFIG_DVB_USB_FRIIO=m
+CONFIG_DVB_USB_AZ6027=m
+CONFIG_DVB_USB_TECHNISAT_USB2=m
+CONFIG_DVB_USB_V2=m
+CONFIG_DVB_USB_AF9015=m
+CONFIG_DVB_USB_AF9035=m
+CONFIG_DVB_USB_ANYSEE=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_AZ6007=m
+CONFIG_DVB_USB_CE6230=m
+CONFIG_DVB_USB_EC168=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_LME2510=m
+CONFIG_DVB_USB_MXL111SF=m
+CONFIG_DVB_USB_RTL28XXU=m
+CONFIG_DVB_USB_DVBSKY=m
+CONFIG_DVB_USB_ZD1301=m
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_SMS_USB_DRV=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
+CONFIG_DVB_AS102=m
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_V4L2=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=m
+
+#
+# Software defined radio USB devices
+#
+CONFIG_USB_AIRSPY=m
+CONFIG_USB_HACKRF=m
+CONFIG_USB_MSI2500=m
+
+#
+# USB HDMI CEC adapters
+#
+CONFIG_USB_PULSE8_CEC=m
+CONFIG_USB_RAINSHADOW_CEC=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+
+#
+# Media capture support
+#
+CONFIG_VIDEO_MEYE=m
+CONFIG_VIDEO_SOLO6X10=m
+CONFIG_VIDEO_TW5864=m
+CONFIG_VIDEO_TW68=m
+CONFIG_VIDEO_TW686X=m
+CONFIG_VIDEO_ZORAN=m
+CONFIG_VIDEO_ZORAN_DC30=m
+CONFIG_VIDEO_ZORAN_ZR36060=m
+CONFIG_VIDEO_ZORAN_BUZ=m
+CONFIG_VIDEO_ZORAN_DC10=m
+CONFIG_VIDEO_ZORAN_LML33=m
+CONFIG_VIDEO_ZORAN_LML33R10=m
+CONFIG_VIDEO_ZORAN_AVS6EYES=m
+
+#
+# Media capture/analog TV support
+#
+CONFIG_VIDEO_IVTV=m
+# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
+CONFIG_VIDEO_IVTV_ALSA=m
+CONFIG_VIDEO_FB_IVTV=m
+CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_HEXIUM_ORION=m
+CONFIG_VIDEO_MXB=m
+CONFIG_VIDEO_DT3155=m
+
+#
+# Media capture/analog/hybrid TV support
+#
+CONFIG_VIDEO_CX18=m
+CONFIG_VIDEO_CX18_ALSA=m
+CONFIG_VIDEO_CX23885=m
+CONFIG_MEDIA_ALTERA_CI=m
+CONFIG_VIDEO_CX25821=m
+CONFIG_VIDEO_CX25821_ALSA=m
+CONFIG_VIDEO_CX88=m
+CONFIG_VIDEO_CX88_ALSA=m
+CONFIG_VIDEO_CX88_BLACKBIRD=m
+CONFIG_VIDEO_CX88_DVB=m
+CONFIG_VIDEO_CX88_ENABLE_VP3054=y
+CONFIG_VIDEO_CX88_VP3054=m
+CONFIG_VIDEO_CX88_MPEG=m
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_SAA7134_ALSA=m
+CONFIG_VIDEO_SAA7134_RC=y
+CONFIG_VIDEO_SAA7134_DVB=m
+CONFIG_VIDEO_SAA7134_GO7007=m
+CONFIG_VIDEO_SAA7164=m
+CONFIG_VIDEO_COBALT=m
+
+#
+# Media digital TV PCI Adapters
+#
+CONFIG_DVB_AV7110_IR=y
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET_CORE=m
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
+# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
+CONFIG_DVB_PLUTO2=m
+CONFIG_DVB_DM1105=m
+CONFIG_DVB_PT1=m
+CONFIG_DVB_PT3=m
+CONFIG_MANTIS_CORE=m
+CONFIG_DVB_MANTIS=m
+CONFIG_DVB_HOPPER=m
+CONFIG_DVB_NGENE=m
+CONFIG_DVB_DDBRIDGE=m
+# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
+CONFIG_DVB_SMIPCIE=m
+CONFIG_DVB_NETUP_UNIDVB=m
+CONFIG_VIDEO_IPU3_CIO2=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_CAFE_CCIC=m
+CONFIG_VIDEO_MUX=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_XILINX=m
+CONFIG_VIDEO_XILINX_TPG=m
+CONFIG_VIDEO_XILINX_VTC=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
+CONFIG_VIDEO_SH_VEU=m
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIMC=m
+CONFIG_VIDEO_VIVID=m
+CONFIG_VIDEO_VIVID_CEC=y
+CONFIG_VIDEO_VIVID_MAX_DEVS=64
+CONFIG_VIDEO_VIM2M=m
+CONFIG_DVB_PLATFORM_DRIVERS=y
+CONFIG_CEC_PLATFORM_DRIVERS=y
+CONFIG_CEC_GPIO=m
+CONFIG_SDR_PLATFORM_DRIVERS=y
+
+#
+# Supported MMC/SDIO adapters
+#
+CONFIG_SMS_SDIO_DRV=m
+CONFIG_RADIO_ADAPTERS=y
+CONFIG_RADIO_TEA575X=m
+CONFIG_RADIO_SI470X=y
+CONFIG_USB_SI470X=m
+CONFIG_I2C_SI470X=m
+CONFIG_RADIO_SI4713=m
+CONFIG_USB_SI4713=m
+CONFIG_PLATFORM_SI4713=m
+CONFIG_I2C_SI4713=m
+CONFIG_RADIO_SI476X=m
+CONFIG_USB_MR800=m
+CONFIG_USB_DSBR=m
+CONFIG_RADIO_MAXIRADIO=m
+CONFIG_RADIO_SHARK=m
+CONFIG_RADIO_SHARK2=m
+CONFIG_USB_KEENE=m
+CONFIG_USB_RAREMONO=m
+CONFIG_USB_MA901=m
+CONFIG_RADIO_TEA5764=m
+CONFIG_RADIO_SAA7706H=m
+CONFIG_RADIO_TEF6862=m
+CONFIG_RADIO_WL1273=m
+
+#
+# Texas Instruments WL128x FM driver (ST based)
+#
+CONFIG_RADIO_WL128X=m
+
+#
+# Supported FireWire (IEEE 1394) Adapters
+#
+CONFIG_DVB_FIREDTV=m
+CONFIG_DVB_FIREDTV_INPUT=y
+CONFIG_MEDIA_COMMON_OPTIONS=y
+
+#
+# common driver options
+#
+CONFIG_VIDEO_CX2341X=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_CYPRESS_FIRMWARE=m
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_V4L2=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+CONFIG_VIDEOBUF2_DMA_SG=m
+CONFIG_VIDEOBUF2_DVB=m
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_SMS_SIANO_MDTV=m
+CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
+CONFIG_VIDEO_V4L2_TPG=m
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, spi, frontends)
+#
+CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
+CONFIG_MEDIA_ATTACH=y
+CONFIG_VIDEO_IR_I2C=m
+
+#
+# Audio decoders, processors and mixers
+#
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_VIDEO_TDA7432=m
+CONFIG_VIDEO_TDA9840=m
+CONFIG_VIDEO_TEA6415C=m
+CONFIG_VIDEO_TEA6420=m
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS3308=m
+CONFIG_VIDEO_CS5345=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_UDA1342=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_WM8739=m
+CONFIG_VIDEO_VP27SMPX=m
+CONFIG_VIDEO_SONY_BTF_MPX=m
+
+#
+# RDS decoders
+#
+CONFIG_VIDEO_SAA6588=m
+
+#
+# Video decoders
+#
+CONFIG_VIDEO_ADV7604=m
+CONFIG_VIDEO_ADV7842=m
+CONFIG_VIDEO_BT819=m
+CONFIG_VIDEO_BT856=m
+CONFIG_VIDEO_BT866=m
+CONFIG_VIDEO_KS0127=m
+CONFIG_VIDEO_SAA7110=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_TVP5150=m
+CONFIG_VIDEO_TW2804=m
+CONFIG_VIDEO_TW9903=m
+CONFIG_VIDEO_TW9906=m
+CONFIG_VIDEO_VPX3220=m
+
+#
+# Video and audio decoders
+#
+CONFIG_VIDEO_SAA717X=m
+CONFIG_VIDEO_CX25840=m
+
+#
+# Video encoders
+#
+CONFIG_VIDEO_SAA7127=m
+CONFIG_VIDEO_SAA7185=m
+CONFIG_VIDEO_ADV7170=m
+CONFIG_VIDEO_ADV7175=m
+CONFIG_VIDEO_ADV7511=m
+
+#
+# Camera sensor devices
+#
+CONFIG_VIDEO_OV2640=m
+CONFIG_VIDEO_OV7640=m
+CONFIG_VIDEO_OV7670=m
+CONFIG_VIDEO_MT9M111=m
+CONFIG_VIDEO_MT9V011=m
+
+#
+# Flash devices
+#
+
+#
+# Video improvement chips
+#
+CONFIG_VIDEO_UPD64031A=m
+CONFIG_VIDEO_UPD64083=m
+
+#
+# Audio/Video compression chips
+#
+CONFIG_VIDEO_SAA6752HS=m
+
+#
+# SDR tuner chips
+#
+
+#
+# Miscellaneous helper chips
+#
+CONFIG_VIDEO_M52790=m
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# soc_camera sensor drivers
+#
+CONFIG_SOC_CAMERA_IMX074=m
+CONFIG_SOC_CAMERA_MT9M001=m
+CONFIG_SOC_CAMERA_MT9M111=m
+CONFIG_SOC_CAMERA_MT9T031=m
+CONFIG_SOC_CAMERA_MT9T112=m
+CONFIG_SOC_CAMERA_MT9V022=m
+CONFIG_SOC_CAMERA_OV5642=m
+CONFIG_SOC_CAMERA_OV772X=m
+CONFIG_SOC_CAMERA_OV9640=m
+CONFIG_SOC_CAMERA_OV9740=m
+CONFIG_SOC_CAMERA_RJ54N1=m
+CONFIG_SOC_CAMERA_TW9910=m
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA18250=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MSI001=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2063=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_MT2131=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_XC4000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_MEDIA_TUNER_MXL5007T=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_MEDIA_TUNER_TDA18218=m
+CONFIG_MEDIA_TUNER_FC0011=m
+CONFIG_MEDIA_TUNER_FC0012=m
+CONFIG_MEDIA_TUNER_FC0013=m
+CONFIG_MEDIA_TUNER_TDA18212=m
+CONFIG_MEDIA_TUNER_E4000=m
+CONFIG_MEDIA_TUNER_FC2580=m
+CONFIG_MEDIA_TUNER_M88RS6000T=m
+CONFIG_MEDIA_TUNER_TUA9001=m
+CONFIG_MEDIA_TUNER_SI2157=m
+CONFIG_MEDIA_TUNER_IT913X=m
+CONFIG_MEDIA_TUNER_R820T=m
+CONFIG_MEDIA_TUNER_MXL301RF=m
+CONFIG_MEDIA_TUNER_QM1D1C0042=m
+
+#
+# Multistandard (satellite) frontends
+#
+CONFIG_DVB_STB0899=m
+CONFIG_DVB_STB6100=m
+CONFIG_DVB_STV090x=m
+CONFIG_DVB_STV0910=m
+CONFIG_DVB_STV6110x=m
+CONFIG_DVB_STV6111=m
+CONFIG_DVB_MXL5XX=m
+CONFIG_DVB_M88DS3103=m
+
+#
+# Multistandard (cable + terrestrial) frontends
+#
+CONFIG_DVB_DRXK=m
+CONFIG_DVB_TDA18271C2DD=m
+CONFIG_DVB_SI2165=m
+CONFIG_DVB_MN88472=m
+CONFIG_DVB_MN88473=m
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_ZL10036=m
+CONFIG_DVB_ZL10039=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0288=m
+CONFIG_DVB_STB6000=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_STV6110=m
+CONFIG_DVB_STV0900=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_TDA8261=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TUNER_CX24113=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+CONFIG_DVB_CX24116=m
+CONFIG_DVB_CX24117=m
+CONFIG_DVB_CX24120=m
+CONFIG_DVB_SI21XX=m
+CONFIG_DVB_TS2020=m
+CONFIG_DVB_DS3000=m
+CONFIG_DVB_MB86A16=m
+CONFIG_DVB_TDA10071=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_DRXD=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_AF9013=m
+CONFIG_DVB_EC100=m
+CONFIG_DVB_STV0367=m
+CONFIG_DVB_CXD2820R=m
+CONFIG_DVB_CXD2841ER=m
+CONFIG_DVB_RTL2830=m
+CONFIG_DVB_RTL2832=m
+CONFIG_DVB_RTL2832_SDR=m
+CONFIG_DVB_SI2168=m
+CONFIG_DVB_AS102_FE=m
+CONFIG_DVB_ZD1301_DEMOD=m
+CONFIG_DVB_GP8PSK_FE=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+CONFIG_DVB_NXT200X=m
+CONFIG_DVB_OR51211=m
+CONFIG_DVB_OR51132=m
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_LGDT3305=m
+CONFIG_DVB_LGDT3306A=m
+CONFIG_DVB_LG2160=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_AU8522_DTV=m
+CONFIG_DVB_AU8522_V4L=m
+CONFIG_DVB_S5H1411=m
+
+#
+# ISDB-T (terrestrial) frontends
+#
+CONFIG_DVB_S921=m
+CONFIG_DVB_DIB8000=m
+CONFIG_DVB_MB86A20S=m
+
+#
+# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
+#
+CONFIG_DVB_TC90522=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_DRX39XYJ=m
+CONFIG_DVB_LNBH25=m
+CONFIG_DVB_LNBP21=m
+CONFIG_DVB_LNBP22=m
+CONFIG_DVB_ISL6405=m
+CONFIG_DVB_ISL6421=m
+CONFIG_DVB_ISL6423=m
+CONFIG_DVB_A8293=m
+CONFIG_DVB_SP2=m
+CONFIG_DVB_LGS8GXX=m
+CONFIG_DVB_ATBM8830=m
+CONFIG_DVB_TDA665x=m
+CONFIG_DVB_IX2505V=m
+CONFIG_DVB_M88RS2000=m
+CONFIG_DVB_AF9033=m
+CONFIG_DVB_HORUS3A=m
+CONFIG_DVB_ASCOT2E=m
+CONFIG_DVB_HELENE=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=m
+CONFIG_AGP_AMD64=m
+CONFIG_AGP_INTEL=m
+CONFIG_AGP_SIS=m
+CONFIG_AGP_VIA=m
+CONFIG_INTEL_GTT=m
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=10
+CONFIG_VGA_SWITCHEROO=y
+CONFIG_DRM=m
+CONFIG_DRM_MIPI_DSI=y
+CONFIG_DRM_DP_AUX_CHARDEV=y
+# CONFIG_DRM_DEBUG_MM_SELFTEST is not set
+CONFIG_DRM_KMS_HELPER=m
+CONFIG_DRM_KMS_FB_HELPER=y
+CONFIG_DRM_FBDEV_EMULATION=y
+CONFIG_DRM_FBDEV_OVERALLOC=100
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_TTM=m
+CONFIG_DRM_GEM_CMA_HELPER=y
+CONFIG_DRM_KMS_CMA_HELPER=y
+CONFIG_DRM_VM=y
+CONFIG_DRM_SCHED=m
+
+#
+# I2C encoder or helper chips
+#
+CONFIG_DRM_I2C_CH7006=m
+CONFIG_DRM_I2C_SIL164=m
+CONFIG_DRM_I2C_NXP_TDA998X=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_RADEON_USERPTR=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_AMDGPU_SI=y
+CONFIG_DRM_AMDGPU_CIK=y
+CONFIG_DRM_AMDGPU_USERPTR=y
+# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
+
+#
+# ACP (Audio CoProcessor) Configuration
+#
+CONFIG_DRM_AMD_ACP=y
+
+#
+# Display Engine Configuration
+#
+CONFIG_DRM_AMD_DC=y
+CONFIG_DRM_AMD_DC_PRE_VEGA=y
+# CONFIG_DRM_AMD_DC_FBC is not set
+CONFIG_DRM_AMD_DC_DCN1_0=y
+# CONFIG_DEBUG_KERNEL_DC is not set
+
+#
+# AMD Library routines
+#
+CONFIG_CHASH=m
+# CONFIG_CHASH_STATS is not set
+# CONFIG_CHASH_SELFTEST is not set
+CONFIG_DRM_NOUVEAU=m
+CONFIG_NOUVEAU_DEBUG=5
+CONFIG_NOUVEAU_DEBUG_DEFAULT=3
+# CONFIG_NOUVEAU_DEBUG_MMU is not set
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
+CONFIG_DRM_I915=m
+CONFIG_DRM_I915_ALPHA_SUPPORT=y
+CONFIG_DRM_I915_CAPTURE_ERROR=y
+CONFIG_DRM_I915_COMPRESS_ERROR=y
+CONFIG_DRM_I915_USERPTR=y
+CONFIG_DRM_I915_GVT=y
+CONFIG_DRM_I915_GVT_KVMGT=m
+
+#
+# drm/i915 Debugging
+#
+# CONFIG_DRM_I915_WERROR is not set
+# CONFIG_DRM_I915_DEBUG is not set
+# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
+# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
+# CONFIG_DRM_I915_SELFTEST is not set
+# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
+# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
+CONFIG_DRM_VGEM=m
+CONFIG_DRM_VMWGFX=m
+CONFIG_DRM_VMWGFX_FBCON=y
+CONFIG_DRM_GMA500=m
+CONFIG_DRM_GMA600=y
+CONFIG_DRM_GMA3600=y
+CONFIG_DRM_UDL=m
+CONFIG_DRM_AST=m
+CONFIG_DRM_MGAG200=m
+CONFIG_DRM_CIRRUS_QEMU=m
+CONFIG_DRM_RCAR_DW_HDMI=m
+CONFIG_DRM_QXL=m
+CONFIG_DRM_BOCHS=m
+CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_DRM_PANEL=y
+
+#
+# Display Panels
+#
+CONFIG_DRM_PANEL_LVDS=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_PANEL_ILITEK_IL9322=m
+CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m
+CONFIG_DRM_PANEL_JDI_LT070ME05000=m
+CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
+CONFIG_DRM_PANEL_LG_LG4573=m
+CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
+CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m
+CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=m
+CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m
+CONFIG_DRM_PANEL_SHARP_LS043T1LE01=m
+CONFIG_DRM_PANEL_SITRONIX_ST7789V=m
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PANEL_BRIDGE=y
+
+#
+# Display Interface Bridges
+#
+CONFIG_DRM_ANALOGIX_ANX78XX=m
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_LVDS_ENCODER=m
+CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
+CONFIG_DRM_SIL_SII8620=m
+CONFIG_DRM_SII902X=m
+CONFIG_DRM_SII9234=m
+CONFIG_DRM_TOSHIBA_TC358767=m
+CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_I2C_ADV7533=y
+CONFIG_DRM_I2C_ADV7511_CEC=y
+CONFIG_DRM_DW_HDMI=m
+CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
+CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
+CONFIG_DRM_DW_HDMI_CEC=m
+CONFIG_HSA_AMD=m
+CONFIG_DRM_ARCPGU=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_MXS=y
+CONFIG_DRM_MXSFB=m
+CONFIG_DRM_TINYDRM=m
+CONFIG_TINYDRM_MIPI_DBI=m
+CONFIG_TINYDRM_ILI9225=m
+CONFIG_TINYDRM_MI0283QT=m
+CONFIG_TINYDRM_REPAPER=m
+CONFIG_TINYDRM_ST7586=m
+CONFIG_TINYDRM_ST7735R=m
+# CONFIG_DRM_LEGACY is not set
+CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
+# CONFIG_DRM_LIB_RANDOM is not set
+
+#
+# Frame buffer Devices
+#
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_CMDLINE=y
+CONFIG_FB_NOTIFY=y
+# CONFIG_FB_DDC is not set
+CONFIG_FB_BOOT_VESA_SUPPORT=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=m
+CONFIG_FB_SYS_COPYAREA=m
+CONFIG_FB_SYS_IMAGEBLIT=m
+# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=m
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+CONFIG_FB_BACKLIGHT=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+CONFIG_FB_VESA=y
+CONFIG_FB_EFI=y
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_OPENCORES is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SM501 is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_XEN_FBDEV_FRONTEND=m
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+CONFIG_FB_HYPERV=m
+CONFIG_FB_SIMPLE=y
+# CONFIG_FB_SSD1307 is not set
+# CONFIG_FB_SM712 is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=m
+CONFIG_LCD_L4F00242T03=m
+CONFIG_LCD_LMS283GF05=m
+CONFIG_LCD_LTV350QV=m
+CONFIG_LCD_ILI922X=m
+CONFIG_LCD_ILI9320=m
+CONFIG_LCD_TDO24M=m
+CONFIG_LCD_VGG2432A4=m
+CONFIG_LCD_PLATFORM=m
+CONFIG_LCD_S6E63M0=m
+CONFIG_LCD_LD9040=m
+CONFIG_LCD_AMS369FG06=m
+CONFIG_LCD_LMS501KF03=m
+CONFIG_LCD_HX8357=m
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_LM3533=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_DA903X=m
+CONFIG_BACKLIGHT_DA9052=m
+CONFIG_BACKLIGHT_MAX8925=m
+CONFIG_BACKLIGHT_APPLE=m
+CONFIG_BACKLIGHT_PM8941_WLED=m
+CONFIG_BACKLIGHT_SAHARA=m
+CONFIG_BACKLIGHT_WM831X=m
+CONFIG_BACKLIGHT_ADP5520=m
+CONFIG_BACKLIGHT_ADP8860=m
+CONFIG_BACKLIGHT_ADP8870=m
+CONFIG_BACKLIGHT_88PM860X=m
+CONFIG_BACKLIGHT_PCF50633=m
+CONFIG_BACKLIGHT_AAT2870=m
+CONFIG_BACKLIGHT_LM3630A=m
+CONFIG_BACKLIGHT_LM3639=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_BACKLIGHT_LP8788=m
+CONFIG_BACKLIGHT_PANDORA=m
+CONFIG_BACKLIGHT_SKY81452=m
+CONFIG_BACKLIGHT_TPS65217=m
+CONFIG_BACKLIGHT_AS3711=m
+CONFIG_BACKLIGHT_GPIO=m
+CONFIG_BACKLIGHT_LV5207LP=m
+CONFIG_BACKLIGHT_BD6107=m
+CONFIG_BACKLIGHT_ARCXCNN=m
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEOMODE_HELPERS=y
+CONFIG_HDMI=y
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
+# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=80
+CONFIG_DUMMY_CONSOLE_ROWS=25
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_PCM_ELD=y
+CONFIG_SND_PCM_IEC958=y
+CONFIG_SND_DMAENGINE_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_SEQ_DEVICE=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_COMPRESS_OFFLOAD=m
+CONFIG_SND_JACK=y
+CONFIG_SND_JACK_INPUT_DEV=y
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_PCM_TIMER=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_MAX_CARDS=32
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_PROC_FS=y
+CONFIG_SND_VERBOSE_PROCFS=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+# CONFIG_SND_DEBUG_VERBOSE is not set
+# CONFIG_SND_PCM_XRUN_DEBUG is not set
+CONFIG_SND_VMASTER=y
+CONFIG_SND_DMA_SGBUF=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_SEQ_MIDI_EVENT=m
+CONFIG_SND_SEQ_MIDI=m
+CONFIG_SND_SEQ_MIDI_EMUL=m
+CONFIG_SND_SEQ_VIRMIDI=m
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
+CONFIG_SND_OPL3_LIB_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+CONFIG_SND_VX_LIB=m
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_PCSP=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_ALOOP=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+CONFIG_SND_MTS64=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_PORTMAN2X4=m
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
+CONFIG_SND_SB_COMMON=m
+CONFIG_SND_PCI=y
+CONFIG_SND_AD1889=m
+CONFIG_SND_ALS300=m
+CONFIG_SND_ALS4000=m
+CONFIG_SND_ALI5451=m
+CONFIG_SND_ASIHPI=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_ATIIXP_MODEM=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_AW2=m
+CONFIG_SND_AZT3328=m
+CONFIG_SND_BT87X=m
+# CONFIG_SND_BT87X_OVERCLOCK is not set
+CONFIG_SND_CA0106=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_OXYGEN_LIB=m
+CONFIG_SND_OXYGEN=m
+CONFIG_SND_CS4281=m
+CONFIG_SND_CS46XX=m
+CONFIG_SND_CS46XX_NEW_DSP=y
+CONFIG_SND_CTXFI=m
+CONFIG_SND_DARLA20=m
+CONFIG_SND_GINA20=m
+CONFIG_SND_LAYLA20=m
+CONFIG_SND_DARLA24=m
+CONFIG_SND_GINA24=m
+CONFIG_SND_LAYLA24=m
+CONFIG_SND_MONA=m
+CONFIG_SND_MIA=m
+CONFIG_SND_ECHO3G=m
+CONFIG_SND_INDIGO=m
+CONFIG_SND_INDIGOIO=m
+CONFIG_SND_INDIGODJ=m
+CONFIG_SND_INDIGOIOX=m
+CONFIG_SND_INDIGODJX=m
+CONFIG_SND_EMU10K1=m
+CONFIG_SND_EMU10K1_SEQ=m
+CONFIG_SND_EMU10K1X=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_ES1938=m
+CONFIG_SND_ES1968=m
+CONFIG_SND_ES1968_INPUT=y
+CONFIG_SND_ES1968_RADIO=y
+CONFIG_SND_FM801=m
+CONFIG_SND_FM801_TEA575X_BOOL=y
+CONFIG_SND_HDSP=m
+CONFIG_SND_HDSPM=m
+CONFIG_SND_ICE1712=m
+CONFIG_SND_ICE1724=m
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_LOLA=m
+CONFIG_SND_LX6464ES=m
+CONFIG_SND_MAESTRO3=m
+CONFIG_SND_MAESTRO3_INPUT=y
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+CONFIG_SND_PCXHR=m
+CONFIG_SND_RIPTIDE=m
+CONFIG_SND_RME32=m
+CONFIG_SND_RME96=m
+CONFIG_SND_RME9652=m
+CONFIG_SND_SONICVIBES=m
+CONFIG_SND_TRIDENT=m
+CONFIG_SND_VIA82XX=m
+CONFIG_SND_VIA82XX_MODEM=m
+CONFIG_SND_VIRTUOSO=m
+CONFIG_SND_VX222=m
+CONFIG_SND_YMFPCI=m
+
+#
+# HD-Audio
+#
+CONFIG_SND_HDA=m
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_RECONFIG=y
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_INPUT_BEEP_MODE=1
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
+CONFIG_SND_HDA_CODEC_ANALOG=m
+CONFIG_SND_HDA_CODEC_SIGMATEL=m
+CONFIG_SND_HDA_CODEC_VIA=m
+CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_HDA_CODEC_CIRRUS=m
+CONFIG_SND_HDA_CODEC_CONEXANT=m
+CONFIG_SND_HDA_CODEC_CA0110=m
+CONFIG_SND_HDA_CODEC_CA0132=m
+CONFIG_SND_HDA_CODEC_CA0132_DSP=y
+CONFIG_SND_HDA_CODEC_CMEDIA=m
+CONFIG_SND_HDA_CODEC_SI3054=m
+CONFIG_SND_HDA_GENERIC=m
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10
+CONFIG_SND_HDA_CORE=m
+CONFIG_SND_HDA_DSP_LOADER=y
+CONFIG_SND_HDA_I915=y
+CONFIG_SND_HDA_EXT_CORE=m
+CONFIG_SND_HDA_PREALLOC_SIZE=4096
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_UA101=m
+CONFIG_SND_USB_USX2Y=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_USB_US122L=m
+CONFIG_SND_USB_6FIRE=m
+CONFIG_SND_USB_HIFACE=m
+CONFIG_SND_BCD2000=m
+CONFIG_SND_USB_LINE6=m
+CONFIG_SND_USB_POD=m
+CONFIG_SND_USB_PODHD=m
+CONFIG_SND_USB_TONEPORT=m
+CONFIG_SND_USB_VARIAX=m
+CONFIG_SND_FIREWIRE=y
+CONFIG_SND_FIREWIRE_LIB=m
+CONFIG_SND_DICE=m
+CONFIG_SND_OXFW=m
+CONFIG_SND_ISIGHT=m
+CONFIG_SND_FIREWORKS=m
+CONFIG_SND_BEBOB=m
+CONFIG_SND_FIREWIRE_DIGI00X=m
+CONFIG_SND_FIREWIRE_TASCAM=m
+CONFIG_SND_FIREWIRE_MOTU=m
+CONFIG_SND_FIREFACE=m
+CONFIG_SND_PCMCIA=y
+CONFIG_SND_VXPOCKET=m
+CONFIG_SND_PDAUDIOCF=m
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
+CONFIG_SND_SOC_COMPRESS=y
+CONFIG_SND_SOC_TOPOLOGY=y
+CONFIG_SND_SOC_ACPI=m
+CONFIG_SND_SOC_AMD_ACP=m
+CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
+CONFIG_SND_ATMEL_SOC=m
+CONFIG_SND_DESIGNWARE_I2S=m
+CONFIG_SND_DESIGNWARE_PCM=y
+
+#
+# SoC Audio for Freescale CPUs
+#
+
+#
+# Common SoC Audio options for Freescale CPUs:
+#
+# CONFIG_SND_SOC_FSL_ASRC is not set
+# CONFIG_SND_SOC_FSL_SAI is not set
+# CONFIG_SND_SOC_FSL_SSI is not set
+# CONFIG_SND_SOC_FSL_SPDIF is not set
+# CONFIG_SND_SOC_FSL_ESAI is not set
+# CONFIG_SND_SOC_IMX_AUDMUX is not set
+CONFIG_SND_I2S_HI6210_I2S=m
+CONFIG_SND_SOC_IMG=y
+CONFIG_SND_SOC_IMG_I2S_IN=m
+CONFIG_SND_SOC_IMG_I2S_OUT=m
+CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
+CONFIG_SND_SOC_IMG_SPDIF_IN=m
+CONFIG_SND_SOC_IMG_SPDIF_OUT=m
+CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
+CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
+CONFIG_SND_SST_IPC=m
+CONFIG_SND_SST_IPC_PCI=m
+CONFIG_SND_SST_IPC_ACPI=m
+CONFIG_SND_SOC_INTEL_SST_ACPI=m
+CONFIG_SND_SOC_INTEL_SST=m
+CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m
+CONFIG_SND_SOC_INTEL_HASWELL=m
+CONFIG_SND_SOC_INTEL_BAYTRAIL=m
+CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
+CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
+CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
+CONFIG_SND_SOC_INTEL_SKYLAKE=m
+CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
+CONFIG_SND_SOC_INTEL_MACH=y
+CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
+CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
+CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
+CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH=m
+CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH=m
+CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
+CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
+CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
+CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
+CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
+CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
+CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
+CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m
+CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
+CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
+CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
+CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
+CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
+
+#
+# STMicroelectronics STM32 SOC audio support
+#
+CONFIG_SND_SOC_XTFPGA_I2S=m
+CONFIG_ZX_TDM=m
+CONFIG_SND_SOC_I2C_AND_SPI=m
+
+#
+# CODEC drivers
+#
+CONFIG_SND_SOC_AC97_CODEC=m
+CONFIG_SND_SOC_ADAU_UTILS=m
+CONFIG_SND_SOC_ADAU1701=m
+CONFIG_SND_SOC_ADAU17X1=m
+CONFIG_SND_SOC_ADAU1761=m
+CONFIG_SND_SOC_ADAU1761_I2C=m
+CONFIG_SND_SOC_ADAU1761_SPI=m
+CONFIG_SND_SOC_ADAU7002=m
+CONFIG_SND_SOC_AK4104=m
+CONFIG_SND_SOC_AK4554=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_AK5386=m
+CONFIG_SND_SOC_ALC5623=m
+# CONFIG_SND_SOC_BT_SCO is not set
+CONFIG_SND_SOC_CS35L32=m
+CONFIG_SND_SOC_CS35L33=m
+CONFIG_SND_SOC_CS35L34=m
+CONFIG_SND_SOC_CS35L35=m
+CONFIG_SND_SOC_CS42L42=m
+CONFIG_SND_SOC_CS42L51=m
+CONFIG_SND_SOC_CS42L51_I2C=m
+CONFIG_SND_SOC_CS42L52=m
+CONFIG_SND_SOC_CS42L56=m
+CONFIG_SND_SOC_CS42L73=m
+CONFIG_SND_SOC_CS4265=m
+CONFIG_SND_SOC_CS4270=m
+CONFIG_SND_SOC_CS4271=m
+CONFIG_SND_SOC_CS4271_I2C=m
+CONFIG_SND_SOC_CS4271_SPI=m
+CONFIG_SND_SOC_CS42XX8=m
+CONFIG_SND_SOC_CS42XX8_I2C=m
+CONFIG_SND_SOC_CS43130=m
+CONFIG_SND_SOC_CS4349=m
+CONFIG_SND_SOC_CS53L30=m
+CONFIG_SND_SOC_DA7213=m
+CONFIG_SND_SOC_DA7219=m
+CONFIG_SND_SOC_DIO2125=m
+CONFIG_SND_SOC_DMIC=m
+CONFIG_SND_SOC_HDMI_CODEC=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8328=m
+CONFIG_SND_SOC_ES8328_I2C=m
+CONFIG_SND_SOC_ES8328_SPI=m
+CONFIG_SND_SOC_GTM601=m
+CONFIG_SND_SOC_HDAC_HDMI=m
+CONFIG_SND_SOC_INNO_RK3036=m
+CONFIG_SND_SOC_MAX98090=m
+CONFIG_SND_SOC_MAX98357A=m
+CONFIG_SND_SOC_MAX98504=m
+CONFIG_SND_SOC_MAX98927=m
+CONFIG_SND_SOC_MAX98373=m
+CONFIG_SND_SOC_MAX9860=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
+CONFIG_SND_SOC_PCM1681=m
+CONFIG_SND_SOC_PCM179X=m
+CONFIG_SND_SOC_PCM179X_I2C=m
+CONFIG_SND_SOC_PCM179X_SPI=m
+CONFIG_SND_SOC_PCM186X=m
+CONFIG_SND_SOC_PCM186X_I2C=m
+CONFIG_SND_SOC_PCM186X_SPI=m
+CONFIG_SND_SOC_PCM3168A=m
+CONFIG_SND_SOC_PCM3168A_I2C=m
+CONFIG_SND_SOC_PCM3168A_SPI=m
+CONFIG_SND_SOC_PCM512x=m
+CONFIG_SND_SOC_PCM512x_I2C=m
+CONFIG_SND_SOC_PCM512x_SPI=m
+CONFIG_SND_SOC_RL6231=m
+CONFIG_SND_SOC_RL6347A=m
+CONFIG_SND_SOC_RT286=m
+CONFIG_SND_SOC_RT298=m
+CONFIG_SND_SOC_RT5514=m
+CONFIG_SND_SOC_RT5514_SPI=m
+# CONFIG_SND_SOC_RT5514_SPI_BUILTIN is not set
+CONFIG_SND_SOC_RT5616=m
+CONFIG_SND_SOC_RT5631=m
+CONFIG_SND_SOC_RT5640=m
+CONFIG_SND_SOC_RT5645=m
+CONFIG_SND_SOC_RT5651=m
+CONFIG_SND_SOC_RT5663=m
+CONFIG_SND_SOC_RT5670=m
+CONFIG_SND_SOC_RT5677=m
+CONFIG_SND_SOC_RT5677_SPI=m
+CONFIG_SND_SOC_SGTL5000=m
+CONFIG_SND_SOC_SI476X=m
+CONFIG_SND_SOC_SIGMADSP=m
+CONFIG_SND_SOC_SIGMADSP_I2C=m
+CONFIG_SND_SOC_SIGMADSP_REGMAP=m
+CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
+CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_SSM2602=m
+CONFIG_SND_SOC_SSM2602_SPI=m
+CONFIG_SND_SOC_SSM2602_I2C=m
+CONFIG_SND_SOC_SSM4567=m
+CONFIG_SND_SOC_STA32X=m
+CONFIG_SND_SOC_STA350=m
+CONFIG_SND_SOC_STI_SAS=m
+CONFIG_SND_SOC_TAS2552=m
+CONFIG_SND_SOC_TAS5086=m
+CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_TAS5720=m
+CONFIG_SND_SOC_TAS6424=m
+CONFIG_SND_SOC_TFA9879=m
+CONFIG_SND_SOC_TLV320AIC23=m
+CONFIG_SND_SOC_TLV320AIC23_I2C=m
+CONFIG_SND_SOC_TLV320AIC23_SPI=m
+CONFIG_SND_SOC_TLV320AIC31XX=m
+CONFIG_SND_SOC_TLV320AIC32X4=m
+CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
+CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
+CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_TS3A227E=m
+CONFIG_SND_SOC_TSCS42XX=m
+CONFIG_SND_SOC_WM8510=m
+CONFIG_SND_SOC_WM8523=m
+CONFIG_SND_SOC_WM8524=m
+CONFIG_SND_SOC_WM8580=m
+CONFIG_SND_SOC_WM8711=m
+CONFIG_SND_SOC_WM8728=m
+CONFIG_SND_SOC_WM8731=m
+CONFIG_SND_SOC_WM8737=m
+CONFIG_SND_SOC_WM8741=m
+CONFIG_SND_SOC_WM8750=m
+CONFIG_SND_SOC_WM8753=m
+CONFIG_SND_SOC_WM8770=m
+CONFIG_SND_SOC_WM8776=m
+CONFIG_SND_SOC_WM8804=m
+CONFIG_SND_SOC_WM8804_I2C=m
+CONFIG_SND_SOC_WM8804_SPI=m
+CONFIG_SND_SOC_WM8903=m
+CONFIG_SND_SOC_WM8960=m
+CONFIG_SND_SOC_WM8962=m
+CONFIG_SND_SOC_WM8974=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SOC_WM8985=m
+CONFIG_SND_SOC_ZX_AUD96P22=m
+CONFIG_SND_SOC_NAU8540=m
+CONFIG_SND_SOC_NAU8810=m
+CONFIG_SND_SOC_NAU8824=m
+CONFIG_SND_SOC_NAU8825=m
+CONFIG_SND_SOC_TPA6130A2=m
+CONFIG_SND_SIMPLE_CARD_UTILS=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_AUDIO_GRAPH_SCU_CARD=m
+CONFIG_SND_X86=y
+CONFIG_HDMI_LPE_AUDIO=m
+CONFIG_SND_SYNTH_EMUX=m
+CONFIG_AC97_BUS=m
+
+#
+# HID support
+#
+CONFIG_HID=m
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=m
+CONFIG_HID_GENERIC=m
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=m
+CONFIG_HID_ACCUTOUCH=m
+CONFIG_HID_ACRUX=m
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=m
+CONFIG_HID_APPLEIR=m
+CONFIG_HID_ASUS=m
+CONFIG_HID_AUREAL=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_BETOP_FF=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CORSAIR=m
+CONFIG_HID_PRODIKEYS=m
+CONFIG_HID_CMEDIA=m
+CONFIG_HID_CP2112=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=m
+CONFIG_HID_ELECOM=m
+CONFIG_HID_ELO=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_GEMBIRD=m
+CONFIG_HID_GFRM=m
+CONFIG_HID_HOLTEK=m
+CONFIG_HOLTEK_FF=y
+CONFIG_HID_GT683R=m
+CONFIG_HID_KEYTOUCH=m
+CONFIG_HID_KYE=m
+CONFIG_HID_UCLOGIC=m
+CONFIG_HID_WALTOP=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_ICADE=m
+CONFIG_HID_ITE=m
+CONFIG_HID_JABRA=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LCPOWER=m
+CONFIG_HID_LED=m
+CONFIG_HID_LENOVO=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_LOGITECH_HIDPP=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIWHEELS_FF=y
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MAYFLASH=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_NTI=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PENMOUNT=m
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_PICOLCD=m
+CONFIG_HID_PICOLCD_FB=y
+CONFIG_HID_PICOLCD_BACKLIGHT=y
+CONFIG_HID_PICOLCD_LCD=y
+CONFIG_HID_PICOLCD_LEDS=y
+CONFIG_HID_PICOLCD_CIR=y
+CONFIG_HID_PLANTRONICS=m
+CONFIG_HID_PRIMAX=m
+CONFIG_HID_RETRODE=m
+CONFIG_HID_ROCCAT=m
+CONFIG_HID_SAITEK=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_SONY_FF=y
+CONFIG_HID_SPEEDLINK=m
+CONFIG_HID_STEELSERIES=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_RMI=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_HYPERV_MOUSE=m
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THINGM=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_UDRAW_PS3=m
+CONFIG_HID_WACOM=m
+CONFIG_HID_WIIMOTE=m
+CONFIG_HID_XINMO=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_HID_ZYDACRON=m
+CONFIG_HID_SENSOR_HUB=m
+# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set
+CONFIG_HID_ALPS=m
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+
+#
+# I2C HID support
+#
+CONFIG_I2C_HID=m
+
+#
+# Intel ISH HID support
+#
+CONFIG_INTEL_ISH_HID=m
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=m
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=m
+CONFIG_USB_PCI=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+CONFIG_USB_DYNAMIC_MINORS=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_LEDS_TRIGGER_USBPORT=m
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB=m
+CONFIG_USB_WUSB_CBAF=m
+# CONFIG_USB_WUSB_CBAF_DEBUG is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_C67X00_HCD=m
+CONFIG_USB_XHCI_HCD=m
+# CONFIG_USB_XHCI_DBGCAP is not set
+CONFIG_USB_XHCI_PCI=m
+CONFIG_USB_XHCI_PLATFORM=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_PCI=m
+CONFIG_USB_EHCI_HCD_PLATFORM=m
+CONFIG_USB_OXU210HP_HCD=m
+CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_ISP1362_HCD=m
+CONFIG_USB_FOTG210_HCD=m
+CONFIG_USB_MAX3421_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PCI=m
+# CONFIG_USB_OHCI_HCD_SSB is not set
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+# CONFIG_USB_SL811_HCD_ISO is not set
+CONFIG_USB_SL811_CS=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_WHCI_HCD=m
+CONFIG_USB_HWA_HCD=m
+CONFIG_USB_HCD_BCMA=m
+CONFIG_USB_HCD_SSB=m
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_REALTEK_AUTOPM=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_ONETOUCH=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_STORAGE_ENE_UB6250=m
+CONFIG_USB_UAS=m
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USBIP_CORE=m
+CONFIG_USBIP_VHCI_HCD=m
+CONFIG_USBIP_VHCI_HC_PORTS=8
+CONFIG_USBIP_VHCI_NR_HCS=1
+CONFIG_USBIP_HOST=m
+CONFIG_USBIP_VUDC=m
+# CONFIG_USBIP_DEBUG is not set
+CONFIG_USB_MUSB_HDRC=m
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_GADGET is not set
+CONFIG_USB_MUSB_DUAL_ROLE=y
+
+#
+# Platform Glue Layer
+#
+
+#
+# MUSB DMA mode
+#
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_DWC3=m
+CONFIG_USB_DWC3_ULPI=y
+# CONFIG_USB_DWC3_HOST is not set
+# CONFIG_USB_DWC3_GADGET is not set
+CONFIG_USB_DWC3_DUAL_ROLE=y
+
+#
+# Platform Glue Driver Support
+#
+CONFIG_USB_DWC3_PCI=m
+CONFIG_USB_DWC3_OF_SIMPLE=m
+CONFIG_USB_DWC2=m
+# CONFIG_USB_DWC2_HOST is not set
+
+#
+# Gadget/Dual-role mode requires USB Gadget support to be enabled
+#
+# CONFIG_USB_DWC2_PERIPHERAL is not set
+CONFIG_USB_DWC2_DUAL_ROLE=y
+CONFIG_USB_DWC2_PCI=m
+# CONFIG_USB_DWC2_DEBUG is not set
+# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
+CONFIG_USB_CHIPIDEA=m
+CONFIG_USB_CHIPIDEA_OF=m
+CONFIG_USB_CHIPIDEA_PCI=m
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=m
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_ISP1761_UDC=y
+# CONFIG_USB_ISP1760_HOST_ROLE is not set
+# CONFIG_USB_ISP1760_GADGET_ROLE is not set
+CONFIG_USB_ISP1760_DUAL_ROLE=y
+
+#
+# USB port drivers
+#
+CONFIG_USB_USS720=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_F81232=m
+CONFIG_USB_SERIAL_F8153X=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_METRO=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7715_PARPORT=y
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MXUPORT=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QCAUX=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_SYMBOL=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_WWAN=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_XSENS_MT=m
+CONFIG_USB_SERIAL_WISHBONE=m
+CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_QT2=m
+CONFIG_USB_SERIAL_UPD78F0730=m
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_SISUSBVGA_CON=y
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_IOWARRIOR=m
+CONFIG_USB_TEST=m
+CONFIG_USB_EHSET_TEST_FIXTURE=m
+CONFIG_USB_ISIGHTFW=m
+CONFIG_USB_YUREX=m
+CONFIG_USB_EZUSB_FX2=m
+CONFIG_USB_HUB_USB251XB=m
+CONFIG_USB_HSIC_USB3503=m
+CONFIG_USB_HSIC_USB4604=m
+CONFIG_USB_LINK_LAYER_TEST=m
+CONFIG_USB_CHAOSKEY=m
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_CXACRU=m
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_XUSBATM=m
+
+#
+# USB Physical Layer drivers
+#
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=m
+CONFIG_USB_GPIO_VBUS=m
+CONFIG_TAHVO_USB=m
+# CONFIG_TAHVO_USB_HOST_BY_DEFAULT is not set
+CONFIG_USB_ISP1301=m
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+CONFIG_U_SERIAL_CONSOLE=y
+
+#
+# USB Peripheral Controller
+#
+CONFIG_USB_FOTG210_UDC=m
+CONFIG_USB_GR_UDC=m
+CONFIG_USB_R8A66597=m
+CONFIG_USB_PXA27X=m
+CONFIG_USB_MV_UDC=m
+CONFIG_USB_MV_U3D=m
+CONFIG_USB_SNP_CORE=m
+CONFIG_USB_SNP_UDC_PLAT=m
+CONFIG_USB_M66592=m
+CONFIG_USB_BDC_UDC=m
+
+#
+# Platform Support
+#
+CONFIG_USB_BDC_PCI=m
+CONFIG_USB_AMD5536UDC=m
+CONFIG_USB_NET2272=m
+CONFIG_USB_NET2272_DMA=y
+CONFIG_USB_NET2280=m
+CONFIG_USB_GOKU=m
+CONFIG_USB_EG20T=m
+CONFIG_USB_GADGET_XILINX=m
+CONFIG_USB_DUMMY_HCD=m
+CONFIG_USB_LIBCOMPOSITE=m
+CONFIG_USB_F_ACM=m
+CONFIG_USB_F_SS_LB=m
+CONFIG_USB_U_SERIAL=m
+CONFIG_USB_U_ETHER=m
+CONFIG_USB_U_AUDIO=m
+CONFIG_USB_F_SERIAL=m
+CONFIG_USB_F_OBEX=m
+CONFIG_USB_F_NCM=m
+CONFIG_USB_F_ECM=m
+CONFIG_USB_F_PHONET=m
+CONFIG_USB_F_EEM=m
+CONFIG_USB_F_SUBSET=m
+CONFIG_USB_F_RNDIS=m
+CONFIG_USB_F_MASS_STORAGE=m
+CONFIG_USB_F_FS=m
+CONFIG_USB_F_UAC1=m
+CONFIG_USB_F_UAC1_LEGACY=m
+CONFIG_USB_F_UAC2=m
+CONFIG_USB_F_UVC=m
+CONFIG_USB_F_MIDI=m
+CONFIG_USB_F_HID=m
+CONFIG_USB_F_PRINTER=m
+CONFIG_USB_F_TCM=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_PHONET=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_LB_SS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
+CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_USB_CONFIGFS_F_PRINTER=y
+CONFIG_USB_CONFIGFS_F_TCM=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_AUDIO=m
+# CONFIG_GADGET_UAC1 is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_G_NCM=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_ETH=y
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+CONFIG_USB_FUNCTIONFS_GENERIC=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_GADGET_TARGET=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_USB_G_PRINTER=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_NOKIA=m
+CONFIG_USB_G_ACM_MS=m
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+CONFIG_USB_G_HID=m
+CONFIG_USB_G_DBGP=m
+# CONFIG_USB_G_DBGP_PRINTK is not set
+CONFIG_USB_G_DBGP_SERIAL=y
+CONFIG_USB_G_WEBCAM=m
+CONFIG_TYPEC=m
+CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_FUSB302=m
+CONFIG_TYPEC_WCOVE=m
+CONFIG_TYPEC_UCSI=m
+CONFIG_UCSI_ACPI=m
+CONFIG_TYPEC_TPS6598X=m
+CONFIG_USB_LED_TRIG=y
+CONFIG_USB_ULPI_BUS=m
+CONFIG_UWB=m
+CONFIG_UWB_HWA=m
+CONFIG_UWB_WHCI=m
+CONFIG_UWB_I1480U=m
+CONFIG_MMC=m
+CONFIG_PWRSEQ_EMMC=m
+CONFIG_PWRSEQ_SD8787=m
+CONFIG_PWRSEQ_SIMPLE=m
+CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_SDIO_UART=m
+CONFIG_MMC_TEST=m
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_SDHCI=m
+CONFIG_MMC_SDHCI_PCI=m
+CONFIG_MMC_RICOH_MMC=y
+CONFIG_MMC_SDHCI_ACPI=m
+CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_SDHCI_OF_ARASAN=m
+CONFIG_MMC_SDHCI_OF_AT91=m
+CONFIG_MMC_SDHCI_CADENCE=m
+CONFIG_MMC_SDHCI_F_SDH30=m
+CONFIG_MMC_WBSD=m
+CONFIG_MMC_TIFM_SD=m
+CONFIG_MMC_SPI=m
+CONFIG_MMC_SDRICOH_CS=m
+CONFIG_MMC_CB710=m
+CONFIG_MMC_VIA_SDMMC=m
+CONFIG_MMC_VUB300=m
+CONFIG_MMC_USHC=m
+CONFIG_MMC_USDHI6ROL0=m
+CONFIG_MMC_REALTEK_PCI=m
+CONFIG_MMC_REALTEK_USB=m
+CONFIG_MMC_CQHCI=m
+CONFIG_MMC_TOSHIBA_PCI=m
+CONFIG_MMC_MTK=m
+CONFIG_MMC_SDHCI_XENON=m
+CONFIG_MMC_SDHCI_OMAP=m
+CONFIG_MEMSTICK=m
+# CONFIG_MEMSTICK_DEBUG is not set
+
+#
+# MemoryStick drivers
+#
+# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
+CONFIG_MSPRO_BLOCK=m
+CONFIG_MS_BLOCK=m
+
+#
+# MemoryStick Host Controller Drivers
+#
+CONFIG_MEMSTICK_TIFM_MS=m
+CONFIG_MEMSTICK_JMICRON_38X=m
+CONFIG_MEMSTICK_R592=m
+CONFIG_MEMSTICK_REALTEK_PCI=m
+CONFIG_MEMSTICK_REALTEK_USB=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS_FLASH=m
+CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_88PM860X=m
+CONFIG_LEDS_AAT1290=m
+CONFIG_LEDS_APU=m
+CONFIG_LEDS_AS3645A=m
+CONFIG_LEDS_BCM6328=m
+CONFIG_LEDS_BCM6358=m
+CONFIG_LEDS_CPCAP=m
+CONFIG_LEDS_LM3530=m
+CONFIG_LEDS_LM3533=m
+CONFIG_LEDS_LM3642=m
+CONFIG_LEDS_LM3692X=m
+CONFIG_LEDS_MT6323=m
+CONFIG_LEDS_PCA9532=m
+CONFIG_LEDS_PCA9532_GPIO=y
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_LP3952=m
+CONFIG_LEDS_LP55XX_COMMON=m
+CONFIG_LEDS_LP5521=m
+CONFIG_LEDS_LP5523=m
+CONFIG_LEDS_LP5562=m
+CONFIG_LEDS_LP8501=m
+CONFIG_LEDS_LP8788=m
+CONFIG_LEDS_LP8860=m
+CONFIG_LEDS_CLEVO_MAIL=m
+CONFIG_LEDS_PCA955X=m
+CONFIG_LEDS_PCA955X_GPIO=y
+CONFIG_LEDS_PCA963X=m
+CONFIG_LEDS_WM831X_STATUS=m
+CONFIG_LEDS_WM8350=m
+CONFIG_LEDS_DA903X=m
+CONFIG_LEDS_DA9052=m
+CONFIG_LEDS_DAC124S085=m
+CONFIG_LEDS_PWM=m
+CONFIG_LEDS_REGULATOR=m
+CONFIG_LEDS_BD2802=m
+CONFIG_LEDS_INTEL_SS4200=m
+CONFIG_LEDS_LT3593=m
+CONFIG_LEDS_ADP5520=m
+CONFIG_LEDS_MC13783=m
+CONFIG_LEDS_TCA6507=m
+CONFIG_LEDS_TLC591XX=m
+CONFIG_LEDS_MAX77693=m
+CONFIG_LEDS_MAX8997=m
+CONFIG_LEDS_LM355x=m
+CONFIG_LEDS_MENF21BMC=m
+CONFIG_LEDS_KTD2692=m
+CONFIG_LEDS_IS31FL319X=m
+CONFIG_LEDS_IS31FL32XX=m
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+CONFIG_LEDS_BLINKM=m
+CONFIG_LEDS_MLXCPLD=m
+CONFIG_LEDS_USER=m
+CONFIG_LEDS_NIC78BX=m
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_ONESHOT=m
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_MTD=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_ACTIVITY=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_LEDS_TRIGGER_TRANSIENT=m
+CONFIG_LEDS_TRIGGER_CAMERA=m
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_NETDEV=m
+CONFIG_ACCESSIBILITY=y
+CONFIG_A11Y_BRAILLE_CONSOLE=y
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_MAD=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set
+CONFIG_INFINIBAND_USER_MEM=y
+CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
+CONFIG_INFINIBAND_ADDR_TRANS=y
+CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
+CONFIG_INFINIBAND_MTHCA=m
+CONFIG_INFINIBAND_MTHCA_DEBUG=y
+CONFIG_INFINIBAND_QIB=m
+CONFIG_INFINIBAND_QIB_DCA=y
+CONFIG_INFINIBAND_CXGB3=m
+# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
+CONFIG_INFINIBAND_CXGB4=m
+CONFIG_INFINIBAND_I40IW=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
+CONFIG_INFINIBAND_NES=m
+# CONFIG_INFINIBAND_NES_DEBUG is not set
+CONFIG_INFINIBAND_OCRDMA=m
+CONFIG_INFINIBAND_VMWARE_PVRDMA=m
+CONFIG_INFINIBAND_USNIC=m
+CONFIG_INFINIBAND_IPOIB=m
+CONFIG_INFINIBAND_IPOIB_CM=y
+CONFIG_INFINIBAND_IPOIB_DEBUG=y
+# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
+CONFIG_INFINIBAND_SRP=m
+CONFIG_INFINIBAND_SRPT=m
+CONFIG_INFINIBAND_ISER=m
+CONFIG_INFINIBAND_ISERT=m
+CONFIG_INFINIBAND_OPA_VNIC=m
+CONFIG_INFINIBAND_RDMAVT=m
+CONFIG_RDMA_RXE=m
+CONFIG_INFINIBAND_HFI1=m
+# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
+# CONFIG_SDMA_VERBOSITY is not set
+CONFIG_INFINIBAND_QEDR=m
+CONFIG_INFINIBAND_BNXT_RE=m
+CONFIG_EDAC_ATOMIC_SCRUB=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=m
+CONFIG_EDAC_GHES=y
+CONFIG_EDAC_AMD64=m
+# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set
+CONFIG_EDAC_E752X=m
+CONFIG_EDAC_I82975X=m
+CONFIG_EDAC_I3000=m
+CONFIG_EDAC_I3200=m
+CONFIG_EDAC_IE31200=m
+CONFIG_EDAC_X38=m
+CONFIG_EDAC_I5400=m
+CONFIG_EDAC_I7CORE=m
+CONFIG_EDAC_I5000=m
+CONFIG_EDAC_I5100=m
+CONFIG_EDAC_I7300=m
+CONFIG_EDAC_SBRIDGE=m
+CONFIG_EDAC_SKX=m
+CONFIG_EDAC_PND2=m
+CONFIG_RTC_LIB=y
+CONFIG_RTC_MC146818_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+CONFIG_RTC_NVMEM=y
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_88PM860X=m
+CONFIG_RTC_DRV_88PM80X=m
+CONFIG_RTC_DRV_ABB5ZES3=m
+CONFIG_RTC_DRV_ABX80X=m
+CONFIG_RTC_DRV_AS3722=m
+CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_DS1307_HWMON=y
+CONFIG_RTC_DRV_DS1307_CENTURY=y
+CONFIG_RTC_DRV_DS1374=m
+CONFIG_RTC_DRV_DS1374_WDT=y
+CONFIG_RTC_DRV_DS1672=m
+CONFIG_RTC_DRV_HYM8563=m
+CONFIG_RTC_DRV_LP8788=m
+CONFIG_RTC_DRV_MAX6900=m
+CONFIG_RTC_DRV_MAX8907=m
+CONFIG_RTC_DRV_MAX8925=m
+CONFIG_RTC_DRV_MAX8998=m
+CONFIG_RTC_DRV_MAX8997=m
+CONFIG_RTC_DRV_MAX77686=m
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_ISL1208=m
+CONFIG_RTC_DRV_ISL12022=m
+CONFIG_RTC_DRV_X1205=m
+CONFIG_RTC_DRV_PCF8523=m
+CONFIG_RTC_DRV_PCF85063=m
+CONFIG_RTC_DRV_PCF85363=m
+CONFIG_RTC_DRV_PCF8563=m
+CONFIG_RTC_DRV_PCF8583=m
+CONFIG_RTC_DRV_M41T80=m
+CONFIG_RTC_DRV_M41T80_WDT=y
+CONFIG_RTC_DRV_BQ32K=m
+CONFIG_RTC_DRV_TWL4030=m
+CONFIG_RTC_DRV_PALMAS=m
+CONFIG_RTC_DRV_TPS6586X=m
+CONFIG_RTC_DRV_TPS65910=m
+CONFIG_RTC_DRV_TPS80031=m
+CONFIG_RTC_DRV_RC5T583=m
+CONFIG_RTC_DRV_S35390A=m
+CONFIG_RTC_DRV_FM3130=m
+CONFIG_RTC_DRV_RX8010=m
+CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_RX8025=m
+CONFIG_RTC_DRV_EM3027=m
+CONFIG_RTC_DRV_RV8803=m
+CONFIG_RTC_DRV_S5M=m
+
+#
+# SPI RTC drivers
+#
+CONFIG_RTC_DRV_M41T93=m
+CONFIG_RTC_DRV_M41T94=m
+CONFIG_RTC_DRV_DS1302=m
+CONFIG_RTC_DRV_DS1305=m
+CONFIG_RTC_DRV_DS1343=m
+CONFIG_RTC_DRV_DS1347=m
+CONFIG_RTC_DRV_DS1390=m
+CONFIG_RTC_DRV_MAX6916=m
+CONFIG_RTC_DRV_R9701=m
+CONFIG_RTC_DRV_RX4581=m
+CONFIG_RTC_DRV_RX6110=m
+CONFIG_RTC_DRV_RS5C348=m
+CONFIG_RTC_DRV_MAX6902=m
+CONFIG_RTC_DRV_PCF2123=m
+CONFIG_RTC_DRV_MCP795=m
+CONFIG_RTC_I2C_AND_SPI=y
+
+#
+# SPI and I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS3232=m
+CONFIG_RTC_DRV_DS3232_HWMON=y
+CONFIG_RTC_DRV_PCF2127=m
+CONFIG_RTC_DRV_RV3029C2=m
+CONFIG_RTC_DRV_RV3029_HWMON=y
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=m
+CONFIG_RTC_DRV_DS1286=m
+CONFIG_RTC_DRV_DS1511=m
+CONFIG_RTC_DRV_DS1553=m
+CONFIG_RTC_DRV_DS1685_FAMILY=m
+CONFIG_RTC_DRV_DS1685=y
+# CONFIG_RTC_DRV_DS1689 is not set
+# CONFIG_RTC_DRV_DS17285 is not set
+# CONFIG_RTC_DRV_DS17485 is not set
+# CONFIG_RTC_DRV_DS17885 is not set
+# CONFIG_RTC_DS1685_PROC_REGS is not set
+# CONFIG_RTC_DS1685_SYSFS_REGS is not set
+CONFIG_RTC_DRV_DS1742=m
+CONFIG_RTC_DRV_DS2404=m
+CONFIG_RTC_DRV_DA9052=m
+CONFIG_RTC_DRV_DA9055=m
+CONFIG_RTC_DRV_DA9063=m
+CONFIG_RTC_DRV_STK17TA8=m
+CONFIG_RTC_DRV_M48T86=m
+CONFIG_RTC_DRV_M48T35=m
+CONFIG_RTC_DRV_M48T59=m
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_BQ4802=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_V3020=m
+CONFIG_RTC_DRV_WM831X=m
+CONFIG_RTC_DRV_WM8350=m
+CONFIG_RTC_DRV_PCF50633=m
+CONFIG_RTC_DRV_AB3100=m
+CONFIG_RTC_DRV_ZYNQMP=m
+CONFIG_RTC_DRV_CROS_EC=m
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_FTRTC010=m
+CONFIG_RTC_DRV_PCAP=m
+CONFIG_RTC_DRV_MC13XXX=m
+CONFIG_RTC_DRV_SNVS=m
+CONFIG_RTC_DRV_MT6397=m
+CONFIG_RTC_DRV_R7301=m
+CONFIG_RTC_DRV_CPCAP=m
+
+#
+# HID Sensor RTC drivers
+#
+CONFIG_RTC_DRV_HID_SENSOR_TIME=m
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+CONFIG_DMA_ACPI=y
+CONFIG_DMA_OF=y
+CONFIG_ALTERA_MSGDMA=m
+CONFIG_FSL_EDMA=m
+CONFIG_INTEL_IDMA64=m
+CONFIG_INTEL_IOATDMA=m
+CONFIG_INTEL_MIC_X100_DMA=m
+CONFIG_QCOM_HIDMA_MGMT=m
+CONFIG_QCOM_HIDMA=m
+CONFIG_DW_DMAC_CORE=y
+CONFIG_DW_DMAC=y
+CONFIG_DW_DMAC_PCI=y
+CONFIG_HSU_DMA=y
+
+#
+# DMA Clients
+#
+CONFIG_ASYNC_TX_DMA=y
+# CONFIG_DMATEST is not set
+CONFIG_DMA_ENGINE_RAID=y
+
+#
+# DMABUF options
+#
+CONFIG_SYNC_FILE=y
+# CONFIG_SW_SYNC is not set
+CONFIG_DCA=m
+CONFIG_AUXDISPLAY=y
+CONFIG_CHARLCD=m
+CONFIG_HD44780=m
+CONFIG_KS0108=m
+CONFIG_KS0108_PORT=0x378
+CONFIG_KS0108_DELAY=2
+CONFIG_CFAG12864B=m
+CONFIG_CFAG12864B_RATE=20
+CONFIG_IMG_ASCII_LCD=m
+CONFIG_HT16K33=m
+CONFIG_PANEL=m
+CONFIG_PANEL_PARPORT=0
+CONFIG_PANEL_PROFILE=5
+# CONFIG_PANEL_CHANGE_MESSAGE is not set
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_UIO_NETX=m
+CONFIG_UIO_PRUSS=m
+CONFIG_UIO_MF624=m
+CONFIG_UIO_HV_GENERIC=m
+CONFIG_VFIO_IOMMU_TYPE1=m
+CONFIG_VFIO_VIRQFD=m
+CONFIG_VFIO=m
+# CONFIG_VFIO_NOIOMMU is not set
+CONFIG_VFIO_PCI=m
+CONFIG_VFIO_PCI_VGA=y
+CONFIG_VFIO_PCI_MMAP=y
+CONFIG_VFIO_PCI_INTX=y
+CONFIG_VFIO_PCI_IGD=y
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
+CONFIG_IRQ_BYPASS_MANAGER=m
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VBOXGUEST=m
+CONFIG_VIRTIO=m
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_PCI_LEGACY=y
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_INPUT=m
+CONFIG_VIRTIO_MMIO=m
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_HYPERV=m
+CONFIG_HYPERV_TSCPAGE=y
+CONFIG_HYPERV_UTILS=m
+CONFIG_HYPERV_BALLOON=m
+
+#
+# Xen driver support
+#
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SELFBALLOONING=y
+CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
+CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT=512
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_DEV_EVTCHN=m
+CONFIG_XEN_BACKEND=y
+CONFIG_XENFS=m
+CONFIG_XEN_COMPAT_XENFS=y
+CONFIG_XEN_SYS_HYPERVISOR=y
+CONFIG_XEN_XENBUS_FRONTEND=y
+CONFIG_XEN_GNTDEV=m
+CONFIG_XEN_GRANT_DEV_ALLOC=m
+CONFIG_SWIOTLB_XEN=y
+CONFIG_XEN_TMEM=m
+CONFIG_XEN_PCIDEV_BACKEND=m
+CONFIG_XEN_PVCALLS_FRONTEND=m
+CONFIG_XEN_PVCALLS_BACKEND=y
+CONFIG_XEN_SCSI_BACKEND=m
+CONFIG_XEN_PRIVCMD=m
+CONFIG_XEN_ACPI_PROCESSOR=m
+CONFIG_XEN_MCE_LOG=y
+CONFIG_XEN_HAVE_PVMMU=y
+CONFIG_XEN_EFI=y
+CONFIG_XEN_AUTO_XLATE=y
+CONFIG_XEN_ACPI=y
+CONFIG_XEN_SYMS=y
+CONFIG_XEN_HAVE_VPMU=y
+CONFIG_STAGING=y
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_ULTRA=y
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_TOIM3232_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+CONFIG_KINGSUN_DONGLE=m
+CONFIG_KSDAZZLE_DONGLE=m
+CONFIG_KS959_DONGLE=m
+
+#
+# FIR device drivers
+#
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_NSC_FIR=m
+CONFIG_WINBOND_FIR=m
+CONFIG_SMC_IRCC_FIR=m
+CONFIG_ALI_FIR=m
+CONFIG_VLSI_FIR=m
+CONFIG_VIA_FIR=m
+CONFIG_MCS_FIR=m
+# CONFIG_IPX is not set
+# CONFIG_NCP_FS is not set
+CONFIG_PRISM2_USB=m
+CONFIG_COMEDI=m
+# CONFIG_COMEDI_DEBUG is not set
+CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
+CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
+CONFIG_COMEDI_MISC_DRIVERS=y
+CONFIG_COMEDI_BOND=m
+CONFIG_COMEDI_TEST=m
+CONFIG_COMEDI_PARPORT=m
+CONFIG_COMEDI_SERIAL2002=m
+# CONFIG_COMEDI_ISA_DRIVERS is not set
+CONFIG_COMEDI_PCI_DRIVERS=m
+CONFIG_COMEDI_8255_PCI=m
+CONFIG_COMEDI_ADDI_WATCHDOG=m
+CONFIG_COMEDI_ADDI_APCI_1032=m
+CONFIG_COMEDI_ADDI_APCI_1500=m
+CONFIG_COMEDI_ADDI_APCI_1516=m
+CONFIG_COMEDI_ADDI_APCI_1564=m
+CONFIG_COMEDI_ADDI_APCI_16XX=m
+CONFIG_COMEDI_ADDI_APCI_2032=m
+CONFIG_COMEDI_ADDI_APCI_2200=m
+CONFIG_COMEDI_ADDI_APCI_3120=m
+CONFIG_COMEDI_ADDI_APCI_3501=m
+CONFIG_COMEDI_ADDI_APCI_3XXX=m
+CONFIG_COMEDI_ADL_PCI6208=m
+CONFIG_COMEDI_ADL_PCI7X3X=m
+CONFIG_COMEDI_ADL_PCI8164=m
+CONFIG_COMEDI_ADL_PCI9111=m
+CONFIG_COMEDI_ADL_PCI9118=m
+CONFIG_COMEDI_ADV_PCI1710=m
+CONFIG_COMEDI_ADV_PCI1720=m
+CONFIG_COMEDI_ADV_PCI1723=m
+CONFIG_COMEDI_ADV_PCI1724=m
+CONFIG_COMEDI_ADV_PCI1760=m
+CONFIG_COMEDI_ADV_PCI_DIO=m
+CONFIG_COMEDI_AMPLC_DIO200_PCI=m
+CONFIG_COMEDI_AMPLC_PC236_PCI=m
+CONFIG_COMEDI_AMPLC_PC263_PCI=m
+CONFIG_COMEDI_AMPLC_PCI224=m
+CONFIG_COMEDI_AMPLC_PCI230=m
+CONFIG_COMEDI_CONTEC_PCI_DIO=m
+CONFIG_COMEDI_DAS08_PCI=m
+CONFIG_COMEDI_DT3000=m
+CONFIG_COMEDI_DYNA_PCI10XX=m
+CONFIG_COMEDI_GSC_HPDI=m
+CONFIG_COMEDI_MF6X4=m
+CONFIG_COMEDI_ICP_MULTI=m
+CONFIG_COMEDI_DAQBOARD2000=m
+CONFIG_COMEDI_JR3_PCI=m
+CONFIG_COMEDI_KE_COUNTER=m
+CONFIG_COMEDI_CB_PCIDAS64=m
+CONFIG_COMEDI_CB_PCIDAS=m
+CONFIG_COMEDI_CB_PCIDDA=m
+CONFIG_COMEDI_CB_PCIMDAS=m
+CONFIG_COMEDI_CB_PCIMDDA=m
+CONFIG_COMEDI_ME4000=m
+CONFIG_COMEDI_ME_DAQ=m
+CONFIG_COMEDI_NI_6527=m
+CONFIG_COMEDI_NI_65XX=m
+CONFIG_COMEDI_NI_660X=m
+CONFIG_COMEDI_NI_670X=m
+CONFIG_COMEDI_NI_LABPC_PCI=m
+CONFIG_COMEDI_NI_PCIDIO=m
+CONFIG_COMEDI_NI_PCIMIO=m
+CONFIG_COMEDI_RTD520=m
+CONFIG_COMEDI_S626=m
+CONFIG_COMEDI_MITE=m
+CONFIG_COMEDI_NI_TIOCMD=m
+CONFIG_COMEDI_PCMCIA_DRIVERS=m
+CONFIG_COMEDI_CB_DAS16_CS=m
+CONFIG_COMEDI_DAS08_CS=m
+CONFIG_COMEDI_NI_DAQ_700_CS=m
+CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
+CONFIG_COMEDI_NI_LABPC_CS=m
+CONFIG_COMEDI_NI_MIO_CS=m
+CONFIG_COMEDI_QUATECH_DAQP_CS=m
+CONFIG_COMEDI_USB_DRIVERS=m
+CONFIG_COMEDI_DT9812=m
+CONFIG_COMEDI_NI_USB6501=m
+CONFIG_COMEDI_USBDUX=m
+CONFIG_COMEDI_USBDUXFAST=m
+CONFIG_COMEDI_USBDUXSIGMA=m
+CONFIG_COMEDI_VMK80XX=m
+CONFIG_COMEDI_8254=m
+CONFIG_COMEDI_8255=m
+CONFIG_COMEDI_8255_SA=m
+CONFIG_COMEDI_KCOMEDILIB=m
+CONFIG_COMEDI_AMPLC_DIO200=m
+CONFIG_COMEDI_AMPLC_PC236=m
+CONFIG_COMEDI_DAS08=m
+CONFIG_COMEDI_NI_LABPC=m
+CONFIG_COMEDI_NI_TIO=m
+CONFIG_RTL8192U=m
+CONFIG_RTLLIB=m
+CONFIG_RTLLIB_CRYPTO_CCMP=m
+CONFIG_RTLLIB_CRYPTO_TKIP=m
+CONFIG_RTLLIB_CRYPTO_WEP=m
+CONFIG_RTL8192E=m
+CONFIG_RTL8723BS=m
+CONFIG_R8712U=m
+CONFIG_R8188EU=m
+CONFIG_88EU_AP_MODE=y
+CONFIG_R8822BE=m
+CONFIG_RTLWIFI_DEBUG_ST=y
+CONFIG_RTS5208=m
+CONFIG_VT6655=m
+CONFIG_VT6656=m
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+CONFIG_ADIS16201=m
+CONFIG_ADIS16203=m
+CONFIG_ADIS16209=m
+CONFIG_ADIS16240=m
+
+#
+# Analog to digital converters
+#
+CONFIG_AD7606=m
+CONFIG_AD7606_IFACE_PARALLEL=m
+CONFIG_AD7606_IFACE_SPI=m
+CONFIG_AD7780=m
+CONFIG_AD7816=m
+CONFIG_AD7192=m
+CONFIG_AD7280=m
+
+#
+# Analog digital bi-direction converters
+#
+CONFIG_ADT7316=m
+CONFIG_ADT7316_SPI=m
+CONFIG_ADT7316_I2C=m
+
+#
+# Capacitance to digital converters
+#
+CONFIG_AD7150=m
+CONFIG_AD7152=m
+CONFIG_AD7746=m
+
+#
+# Direct Digital Synthesis
+#
+CONFIG_AD9832=m
+CONFIG_AD9834=m
+
+#
+# Digital gyroscope sensors
+#
+CONFIG_ADIS16060=m
+
+#
+# Network Analyzer, Impedance Converters
+#
+CONFIG_AD5933=m
+
+#
+# Light sensors
+#
+CONFIG_TSL2x7x=m
+
+#
+# Active energy metering IC
+#
+CONFIG_ADE7753=m
+CONFIG_ADE7754=m
+CONFIG_ADE7758=m
+CONFIG_ADE7759=m
+CONFIG_ADE7854=m
+CONFIG_ADE7854_I2C=m
+CONFIG_ADE7854_SPI=m
+
+#
+# Resolver to digital converters
+#
+CONFIG_AD2S90=m
+CONFIG_AD2S1200=m
+CONFIG_AD2S1210=m
+
+#
+# Triggers - standalone
+#
+CONFIG_FB_SM750=m
+CONFIG_FB_XGI=m
+
+#
+# Speakup console speech
+#
+CONFIG_SPEAKUP=m
+CONFIG_SPEAKUP_SYNTH_ACNTSA=m
+CONFIG_SPEAKUP_SYNTH_APOLLO=m
+CONFIG_SPEAKUP_SYNTH_AUDPTR=m
+CONFIG_SPEAKUP_SYNTH_BNS=m
+CONFIG_SPEAKUP_SYNTH_DECTLK=m
+CONFIG_SPEAKUP_SYNTH_DECEXT=m
+CONFIG_SPEAKUP_SYNTH_LTLK=m
+CONFIG_SPEAKUP_SYNTH_SOFT=m
+CONFIG_SPEAKUP_SYNTH_SPKOUT=m
+CONFIG_SPEAKUP_SYNTH_TXPRT=m
+CONFIG_SPEAKUP_SYNTH_DUMMY=m
+CONFIG_STAGING_MEDIA=y
+# CONFIG_INTEL_ATOMISP is not set
+CONFIG_I2C_BCM2048=m
+CONFIG_DVB_CXD2099=m
+
+#
+# Android
+#
+CONFIG_STAGING_BOARD=y
+CONFIG_LTE_GDM724X=m
+CONFIG_FIREWIRE_SERIAL=m
+CONFIG_FWTTY_MAX_TOTAL_PORTS=64
+CONFIG_FWTTY_MAX_CARD_PORTS=32
+CONFIG_MTD_SPINAND_MT29F=m
+CONFIG_MTD_SPINAND_ONDIEECC=y
+CONFIG_LNET=m
+CONFIG_LNET_MAX_PAYLOAD=1048576
+CONFIG_LNET_SELFTEST=m
+CONFIG_LNET_XPRT_IB=m
+CONFIG_LUSTRE_FS=m
+# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set
+CONFIG_DGNC=m
+CONFIG_GS_FPGABOOT=m
+CONFIG_CRYPTO_SKEIN=m
+CONFIG_UNISYSSPAR=y
+CONFIG_COMMON_CLK_XLNX_CLKWZRD=m
+# CONFIG_FB_TFT is not set
+CONFIG_WILC1000=m
+CONFIG_WILC1000_SDIO=m
+CONFIG_WILC1000_SPI=m
+# CONFIG_WILC1000_HW_OOB_INTR is not set
+CONFIG_MOST=m
+CONFIG_MOST_CDEV=m
+CONFIG_MOST_NET=m
+CONFIG_MOST_SOUND=m
+CONFIG_MOST_VIDEO=m
+CONFIG_MOST_DIM2=m
+CONFIG_MOST_I2C=m
+CONFIG_MOST_USB=m
+CONFIG_KS7010=m
+# CONFIG_GREYBUS is not set
+CONFIG_CRYPTO_DEV_CCREE=m
+
+#
+# USB Power Delivery and Type-C drivers
+#
+CONFIG_TYPEC_TCPCI=m
+CONFIG_DRM_VBOXVIDEO=m
+CONFIG_PI433=m
+CONFIG_X86_PLATFORM_DEVICES=y
+CONFIG_ACER_WMI=m
+CONFIG_ACER_WIRELESS=m
+CONFIG_ACERHDF=m
+CONFIG_ALIENWARE_WMI=m
+CONFIG_ASUS_LAPTOP=m
+CONFIG_DELL_SMBIOS=m
+CONFIG_DELL_SMBIOS_WMI=y
+CONFIG_DELL_SMBIOS_SMM=y
+CONFIG_DELL_LAPTOP=m
+CONFIG_DELL_WMI=m
+CONFIG_DELL_WMI_DESCRIPTOR=m
+CONFIG_DELL_WMI_AIO=m
+CONFIG_DELL_WMI_LED=m
+CONFIG_DELL_SMO8800=m
+CONFIG_DELL_RBTN=m
+CONFIG_FUJITSU_LAPTOP=m
+CONFIG_FUJITSU_TABLET=m
+CONFIG_AMILO_RFKILL=m
+CONFIG_GPD_POCKET_FAN=m
+CONFIG_HP_ACCEL=m
+CONFIG_HP_WIRELESS=m
+CONFIG_HP_WMI=m
+CONFIG_MSI_LAPTOP=m
+CONFIG_PANASONIC_LAPTOP=m
+CONFIG_COMPAL_LAPTOP=m
+CONFIG_SONY_LAPTOP=m
+CONFIG_SONYPI_COMPAT=y
+CONFIG_IDEAPAD_LAPTOP=m
+CONFIG_SURFACE3_WMI=m
+CONFIG_THINKPAD_ACPI=m
+CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
+# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
+# CONFIG_THINKPAD_ACPI_DEBUG is not set
+# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
+CONFIG_THINKPAD_ACPI_VIDEO=y
+CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
+CONFIG_SENSORS_HDAPS=m
+CONFIG_INTEL_MENLOW=m
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_ASUS_WMI=m
+CONFIG_ASUS_NB_WMI=m
+CONFIG_EEEPC_WMI=m
+CONFIG_ASUS_WIRELESS=m
+CONFIG_ACPI_WMI=m
+CONFIG_WMI_BMOF=m
+CONFIG_INTEL_WMI_THUNDERBOLT=m
+CONFIG_MSI_WMI=m
+CONFIG_PEAQ_WMI=m
+CONFIG_TOPSTAR_LAPTOP=m
+CONFIG_ACPI_TOSHIBA=m
+CONFIG_TOSHIBA_BT_RFKILL=m
+CONFIG_TOSHIBA_HAPS=m
+CONFIG_TOSHIBA_WMI=m
+CONFIG_ACPI_CMPC=m
+CONFIG_INTEL_CHT_INT33FE=m
+CONFIG_INTEL_INT0002_VGPIO=m
+CONFIG_INTEL_HID_EVENT=m
+CONFIG_INTEL_VBTN=m
+CONFIG_INTEL_IPS=m
+CONFIG_INTEL_PMC_CORE=y
+CONFIG_IBM_RTL=m
+CONFIG_SAMSUNG_LAPTOP=m
+CONFIG_MXM_WMI=m
+CONFIG_INTEL_OAKTRAIL=m
+CONFIG_SAMSUNG_Q10=m
+CONFIG_APPLE_GMUX=m
+CONFIG_INTEL_RST=m
+CONFIG_INTEL_SMARTCONNECT=m
+CONFIG_PVPANIC=m
+CONFIG_INTEL_PMC_IPC=m
+CONFIG_INTEL_BXTWC_PMIC_TMU=m
+CONFIG_SURFACE_PRO3_BUTTON=m
+CONFIG_SURFACE_3_BUTTON=m
+CONFIG_INTEL_PUNIT_IPC=m
+CONFIG_INTEL_TELEMETRY=m
+CONFIG_MLX_PLATFORM=m
+CONFIG_INTEL_TURBO_MAX_3=y
+CONFIG_SILEAD_DMI=y
+CONFIG_INTEL_CHTDC_TI_PWRBTN=m
+CONFIG_PMC_ATOM=y
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_CHROMEOS_LAPTOP=m
+CONFIG_CHROMEOS_PSTORE=m
+CONFIG_CROS_EC_CTL=m
+CONFIG_CROS_EC_LPC=m
+CONFIG_CROS_EC_LPC_MEC=y
+CONFIG_CROS_EC_PROTO=y
+CONFIG_CROS_KBD_LED_BACKLIGHT=m
+CONFIG_MELLANOX_PLATFORM=y
+CONFIG_MLXREG_HOTPLUG=m
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK=y
+
+#
+# Common Clock Framework
+#
+CONFIG_COMMON_CLK_WM831X=m
+CONFIG_CLK_HSDK=y
+CONFIG_COMMON_CLK_MAX77686=m
+CONFIG_COMMON_CLK_RK808=m
+CONFIG_COMMON_CLK_SI5351=m
+CONFIG_COMMON_CLK_SI514=m
+CONFIG_COMMON_CLK_SI570=m
+CONFIG_COMMON_CLK_CDCE706=m
+CONFIG_COMMON_CLK_CDCE925=m
+CONFIG_COMMON_CLK_CS2000_CP=m
+CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_CLK_TWL6040=m
+# CONFIG_COMMON_CLK_NXP is not set
+CONFIG_COMMON_CLK_PALMAS=m
+CONFIG_COMMON_CLK_PWM=m
+# CONFIG_COMMON_CLK_PXA is not set
+# CONFIG_COMMON_CLK_PIC32 is not set
+CONFIG_COMMON_CLK_VC5=m
+CONFIG_HWSPINLOCK=y
+
+#
+# Clock Source drivers
+#
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_ATMEL_PIT is not set
+# CONFIG_SH_TIMER_CMT is not set
+# CONFIG_SH_TIMER_MTU2 is not set
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_EM_TIMER_STI is not set
+CONFIG_MAILBOX=y
+CONFIG_PLATFORM_MHU=m
+CONFIG_PCC=y
+CONFIG_ALTERA_MBOX=m
+CONFIG_MAILBOX_TEST=m
+CONFIG_IOMMU_API=y
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Generic IOMMU Pagetable Support
+#
+CONFIG_IOMMU_IOVA=y
+CONFIG_OF_IOMMU=y
+CONFIG_AMD_IOMMU=y
+CONFIG_AMD_IOMMU_V2=m
+CONFIG_DMAR_TABLE=y
+CONFIG_INTEL_IOMMU=y
+CONFIG_INTEL_IOMMU_SVM=y
+# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
+CONFIG_INTEL_IOMMU_FLOPPY_WA=y
+CONFIG_IRQ_REMAP=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=m
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=m
+CONFIG_RPMSG_CHAR=m
+CONFIG_RPMSG_QCOM_GLINK_NATIVE=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=m
+CONFIG_RPMSG_VIRTIO=m
+CONFIG_SOUNDWIRE=y
+
+#
+# SoundWire Devices
+#
+CONFIG_SOUNDWIRE_BUS=m
+CONFIG_SOUNDWIRE_CADENCE=m
+CONFIG_SOUNDWIRE_INTEL=m
+
+#
+# SOC (System On Chip) specific Drivers
+#
+
+#
+# Amlogic SoC drivers
+#
+
+#
+# Broadcom SoC drivers
+#
+
+#
+# i.MX SoC drivers
+#
+
+#
+# Qualcomm SoC drivers
+#
+# CONFIG_SUNXI_SRAM is not set
+CONFIG_SOC_TI=y
+
+#
+# Xilinx SoC drivers
+#
+CONFIG_XILINX_VCU=m
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m
+CONFIG_DEVFREQ_GOV_PERFORMANCE=m
+CONFIG_DEVFREQ_GOV_POWERSAVE=m
+CONFIG_DEVFREQ_GOV_USERSPACE=m
+CONFIG_DEVFREQ_GOV_PASSIVE=m
+
+#
+# DEVFREQ Drivers
+#
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+CONFIG_EXTCON_ADC_JACK=m
+CONFIG_EXTCON_ARIZONA=m
+CONFIG_EXTCON_AXP288=m
+CONFIG_EXTCON_GPIO=m
+CONFIG_EXTCON_INTEL_INT3496=m
+CONFIG_EXTCON_INTEL_CHT_WC=m
+CONFIG_EXTCON_MAX14577=m
+CONFIG_EXTCON_MAX3355=m
+CONFIG_EXTCON_MAX77693=m
+CONFIG_EXTCON_MAX77843=m
+CONFIG_EXTCON_MAX8997=m
+CONFIG_EXTCON_PALMAS=m
+CONFIG_EXTCON_RT8973A=m
+CONFIG_EXTCON_SM5502=m
+CONFIG_EXTCON_USB_GPIO=m
+CONFIG_EXTCON_USBC_CROS_EC=m
+CONFIG_MEMORY=y
+CONFIG_IIO=m
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_BUFFER_CB=m
+CONFIG_IIO_BUFFER_HW_CONSUMER=m
+CONFIG_IIO_KFIFO_BUF=m
+CONFIG_IIO_TRIGGERED_BUFFER=m
+CONFIG_IIO_CONFIGFS=m
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+CONFIG_IIO_SW_DEVICE=m
+CONFIG_IIO_SW_TRIGGER=m
+CONFIG_IIO_TRIGGERED_EVENT=m
+
+#
+# Accelerometers
+#
+CONFIG_BMA180=m
+CONFIG_BMA220=m
+CONFIG_BMC150_ACCEL=m
+CONFIG_BMC150_ACCEL_I2C=m
+CONFIG_BMC150_ACCEL_SPI=m
+CONFIG_DA280=m
+CONFIG_DA311=m
+CONFIG_DMARD06=m
+CONFIG_DMARD09=m
+CONFIG_DMARD10=m
+CONFIG_HID_SENSOR_ACCEL_3D=m
+CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
+CONFIG_IIO_ST_ACCEL_3AXIS=m
+CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
+CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
+CONFIG_KXSD9=m
+CONFIG_KXSD9_SPI=m
+CONFIG_KXSD9_I2C=m
+CONFIG_KXCJK1013=m
+CONFIG_MC3230=m
+CONFIG_MMA7455=m
+CONFIG_MMA7455_I2C=m
+CONFIG_MMA7455_SPI=m
+CONFIG_MMA7660=m
+CONFIG_MMA8452=m
+CONFIG_MMA9551_CORE=m
+CONFIG_MMA9551=m
+CONFIG_MMA9553=m
+CONFIG_MXC4005=m
+CONFIG_MXC6255=m
+CONFIG_SCA3000=m
+CONFIG_STK8312=m
+CONFIG_STK8BA50=m
+
+#
+# Analog to digital converters
+#
+CONFIG_AD_SIGMA_DELTA=m
+CONFIG_AD7266=m
+CONFIG_AD7291=m
+CONFIG_AD7298=m
+CONFIG_AD7476=m
+CONFIG_AD7766=m
+CONFIG_AD7791=m
+CONFIG_AD7793=m
+CONFIG_AD7887=m
+CONFIG_AD7923=m
+CONFIG_AD799X=m
+CONFIG_AXP20X_ADC=m
+CONFIG_AXP288_ADC=m
+CONFIG_CC10001_ADC=m
+CONFIG_CPCAP_ADC=m
+CONFIG_DA9150_GPADC=m
+CONFIG_DLN2_ADC=m
+CONFIG_ENVELOPE_DETECTOR=m
+CONFIG_HI8435=m
+CONFIG_HX711=m
+CONFIG_INA2XX_ADC=m
+CONFIG_LP8788_ADC=m
+CONFIG_LTC2471=m
+CONFIG_LTC2485=m
+CONFIG_LTC2497=m
+CONFIG_MAX1027=m
+CONFIG_MAX11100=m
+CONFIG_MAX1118=m
+CONFIG_MAX1363=m
+CONFIG_MAX9611=m
+CONFIG_MCP320X=m
+CONFIG_MCP3422=m
+CONFIG_MEN_Z188_ADC=m
+CONFIG_NAU7802=m
+CONFIG_PALMAS_GPADC=m
+CONFIG_QCOM_VADC_COMMON=m
+CONFIG_QCOM_SPMI_IADC=m
+CONFIG_QCOM_SPMI_VADC=m
+CONFIG_SD_ADC_MODULATOR=m
+CONFIG_TI_ADC081C=m
+CONFIG_TI_ADC0832=m
+CONFIG_TI_ADC084S021=m
+CONFIG_TI_ADC12138=m
+CONFIG_TI_ADC108S102=m
+CONFIG_TI_ADC128S052=m
+CONFIG_TI_ADC161S626=m
+CONFIG_TI_ADS1015=m
+CONFIG_TI_ADS7950=m
+CONFIG_TI_ADS8688=m
+CONFIG_TI_AM335X_ADC=m
+CONFIG_TI_TLC4541=m
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL6030_GPADC=m
+CONFIG_VF610_ADC=m
+CONFIG_VIPERBOARD_ADC=m
+
+#
+# Amplifiers
+#
+CONFIG_AD8366=m
+
+#
+# Chemical Sensors
+#
+CONFIG_ATLAS_PH_SENSOR=m
+CONFIG_CCS811=m
+CONFIG_IAQCORE=m
+CONFIG_VZ89X=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+
+#
+# Hid Sensor IIO Common
+#
+CONFIG_HID_SENSOR_IIO_COMMON=m
+CONFIG_HID_SENSOR_IIO_TRIGGER=m
+CONFIG_IIO_MS_SENSORS_I2C=m
+
+#
+# SSP Sensor Common
+#
+CONFIG_IIO_SSP_SENSORS_COMMONS=m
+CONFIG_IIO_SSP_SENSORHUB=m
+CONFIG_IIO_ST_SENSORS_I2C=m
+CONFIG_IIO_ST_SENSORS_SPI=m
+CONFIG_IIO_ST_SENSORS_CORE=m
+
+#
+# Counters
+#
+
+#
+# Digital to analog converters
+#
+CONFIG_AD5064=m
+CONFIG_AD5360=m
+CONFIG_AD5380=m
+CONFIG_AD5421=m
+CONFIG_AD5446=m
+CONFIG_AD5449=m
+CONFIG_AD5592R_BASE=m
+CONFIG_AD5592R=m
+CONFIG_AD5593R=m
+CONFIG_AD5504=m
+CONFIG_AD5624R_SPI=m
+CONFIG_LTC2632=m
+CONFIG_AD5686=m
+CONFIG_AD5755=m
+CONFIG_AD5761=m
+CONFIG_AD5764=m
+CONFIG_AD5791=m
+CONFIG_AD7303=m
+CONFIG_AD8801=m
+CONFIG_DPOT_DAC=m
+CONFIG_DS4424=m
+CONFIG_M62332=m
+CONFIG_MAX517=m
+CONFIG_MAX5821=m
+CONFIG_MCP4725=m
+CONFIG_MCP4922=m
+CONFIG_TI_DAC082S085=m
+CONFIG_VF610_DAC=m
+
+#
+# IIO dummy driver
+#
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+CONFIG_AD9523=m
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+CONFIG_ADF4350=m
+
+#
+# Digital gyroscope sensors
+#
+CONFIG_ADIS16080=m
+CONFIG_ADIS16130=m
+CONFIG_ADIS16136=m
+CONFIG_ADIS16260=m
+CONFIG_ADXRS450=m
+CONFIG_BMG160=m
+CONFIG_BMG160_I2C=m
+CONFIG_BMG160_SPI=m
+CONFIG_HID_SENSOR_GYRO_3D=m
+CONFIG_MPU3050=m
+CONFIG_MPU3050_I2C=m
+CONFIG_IIO_ST_GYRO_3AXIS=m
+CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
+CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
+CONFIG_ITG3200=m
+
+#
+# Health Sensors
+#
+
+#
+# Heart Rate Monitors
+#
+CONFIG_AFE4403=m
+CONFIG_AFE4404=m
+CONFIG_MAX30100=m
+CONFIG_MAX30102=m
+
+#
+# Humidity sensors
+#
+CONFIG_AM2315=m
+CONFIG_DHT11=m
+CONFIG_HDC100X=m
+CONFIG_HID_SENSOR_HUMIDITY=m
+CONFIG_HTS221=m
+CONFIG_HTS221_I2C=m
+CONFIG_HTS221_SPI=m
+CONFIG_HTU21=m
+CONFIG_SI7005=m
+CONFIG_SI7020=m
+
+#
+# Inertial measurement units
+#
+CONFIG_ADIS16400=m
+CONFIG_ADIS16480=m
+CONFIG_BMI160=m
+CONFIG_BMI160_I2C=m
+CONFIG_BMI160_SPI=m
+CONFIG_KMX61=m
+CONFIG_INV_MPU6050_IIO=m
+CONFIG_INV_MPU6050_I2C=m
+CONFIG_INV_MPU6050_SPI=m
+CONFIG_IIO_ST_LSM6DSX=m
+CONFIG_IIO_ST_LSM6DSX_I2C=m
+CONFIG_IIO_ST_LSM6DSX_SPI=m
+CONFIG_IIO_ADIS_LIB=m
+CONFIG_IIO_ADIS_LIB_BUFFER=y
+
+#
+# Light sensors
+#
+CONFIG_ACPI_ALS=m
+CONFIG_ADJD_S311=m
+CONFIG_AL3320A=m
+CONFIG_APDS9300=m
+CONFIG_APDS9960=m
+CONFIG_BH1750=m
+CONFIG_BH1780=m
+CONFIG_CM32181=m
+CONFIG_CM3232=m
+CONFIG_CM3323=m
+CONFIG_CM3605=m
+CONFIG_CM36651=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_GP2AP020A00F=m
+CONFIG_SENSORS_ISL29018=m
+CONFIG_SENSORS_ISL29028=m
+CONFIG_ISL29125=m
+CONFIG_HID_SENSOR_ALS=m
+CONFIG_HID_SENSOR_PROX=m
+CONFIG_JSA1212=m
+CONFIG_RPR0521=m
+CONFIG_SENSORS_LM3533=m
+CONFIG_LTR501=m
+CONFIG_MAX44000=m
+CONFIG_OPT3001=m
+CONFIG_PA12203001=m
+CONFIG_SI1145=m
+CONFIG_STK3310=m
+CONFIG_ST_UVIS25=m
+CONFIG_ST_UVIS25_I2C=m
+CONFIG_ST_UVIS25_SPI=m
+CONFIG_TCS3414=m
+CONFIG_TCS3472=m
+CONFIG_SENSORS_TSL2563=m
+CONFIG_TSL2583=m
+CONFIG_TSL4531=m
+CONFIG_US5182D=m
+CONFIG_VCNL4000=m
+CONFIG_VEML6070=m
+CONFIG_VL6180=m
+CONFIG_ZOPT2201=m
+
+#
+# Magnetometer sensors
+#
+CONFIG_AK8974=m
+CONFIG_AK8975=m
+CONFIG_AK09911=m
+CONFIG_BMC150_MAGN=m
+CONFIG_BMC150_MAGN_I2C=m
+CONFIG_BMC150_MAGN_SPI=m
+CONFIG_MAG3110=m
+CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
+CONFIG_MMC35240=m
+CONFIG_IIO_ST_MAGN_3AXIS=m
+CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
+CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
+CONFIG_SENSORS_HMC5843=m
+CONFIG_SENSORS_HMC5843_I2C=m
+CONFIG_SENSORS_HMC5843_SPI=m
+
+#
+# Multiplexers
+#
+CONFIG_IIO_MUX=m
+
+#
+# Inclinometer sensors
+#
+CONFIG_HID_SENSOR_INCLINOMETER_3D=m
+CONFIG_HID_SENSOR_DEVICE_ROTATION=m
+
+#
+# Triggers - standalone
+#
+CONFIG_IIO_HRTIMER_TRIGGER=m
+CONFIG_IIO_INTERRUPT_TRIGGER=m
+CONFIG_IIO_TIGHTLOOP_TRIGGER=m
+CONFIG_IIO_SYSFS_TRIGGER=m
+
+#
+# Digital potentiometers
+#
+CONFIG_DS1803=m
+CONFIG_MAX5481=m
+CONFIG_MAX5487=m
+CONFIG_MCP4131=m
+CONFIG_MCP4531=m
+CONFIG_TPL0102=m
+
+#
+# Digital potentiostats
+#
+CONFIG_LMP91000=m
+
+#
+# Pressure sensors
+#
+CONFIG_ABP060MG=m
+CONFIG_BMP280=m
+CONFIG_BMP280_I2C=m
+CONFIG_BMP280_SPI=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_HID_SENSOR_PRESS=m
+CONFIG_HP03=m
+CONFIG_MPL115=m
+CONFIG_MPL115_I2C=m
+CONFIG_MPL115_SPI=m
+CONFIG_MPL3115=m
+CONFIG_MS5611=m
+CONFIG_MS5611_I2C=m
+CONFIG_MS5611_SPI=m
+CONFIG_MS5637=m
+CONFIG_IIO_ST_PRESS=m
+CONFIG_IIO_ST_PRESS_I2C=m
+CONFIG_IIO_ST_PRESS_SPI=m
+CONFIG_T5403=m
+CONFIG_HP206C=m
+CONFIG_ZPA2326=m
+CONFIG_ZPA2326_I2C=m
+CONFIG_ZPA2326_SPI=m
+
+#
+# Lightning sensors
+#
+CONFIG_AS3935=m
+
+#
+# Proximity and distance sensors
+#
+CONFIG_LIDAR_LITE_V2=m
+CONFIG_RFD77402=m
+CONFIG_SRF04=m
+CONFIG_SX9500=m
+CONFIG_SRF08=m
+
+#
+# Temperature sensors
+#
+CONFIG_MAXIM_THERMOCOUPLE=m
+CONFIG_HID_SENSOR_TEMP=m
+CONFIG_MLX90614=m
+CONFIG_TMP006=m
+CONFIG_TMP007=m
+CONFIG_TSYS01=m
+CONFIG_TSYS02D=m
+CONFIG_NTB=m
+CONFIG_NTB_AMD=m
+CONFIG_NTB_IDT=m
+CONFIG_NTB_INTEL=m
+CONFIG_NTB_SWITCHTEC=m
+# CONFIG_NTB_PINGPONG is not set
+# CONFIG_NTB_TOOL is not set
+# CONFIG_NTB_PERF is not set
+CONFIG_NTB_TRANSPORT=m
+CONFIG_VME_BUS=y
+
+#
+# VME Bridge Drivers
+#
+CONFIG_VME_CA91CX42=m
+CONFIG_VME_TSI148=m
+# CONFIG_VME_FAKE is not set
+
+#
+# VME Board Drivers
+#
+CONFIG_VMIVME_7805=m
+
+#
+# VME Device Drivers
+#
+CONFIG_VME_USER=m
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+CONFIG_PWM_ATMEL_HLCDC_PWM=m
+CONFIG_PWM_CRC=y
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_FSL_FTM=m
+CONFIG_PWM_LP3943=m
+CONFIG_PWM_LPSS=m
+CONFIG_PWM_LPSS_PCI=m
+CONFIG_PWM_LPSS_PLATFORM=m
+CONFIG_PWM_PCA9685=m
+CONFIG_PWM_STMPE=y
+CONFIG_PWM_TWL=m
+CONFIG_PWM_TWL_LED=m
+
+#
+# IRQ chip support
+#
+CONFIG_IRQCHIP=y
+CONFIG_ARM_GIC_MAX_NR=1
+# CONFIG_ARM_GIC_V3_ITS is not set
+CONFIG_IPACK_BUS=m
+CONFIG_BOARD_TPCI200=m
+CONFIG_SERIAL_IPOCTAL=m
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_ATH79 is not set
+# CONFIG_RESET_AXS10X is not set
+# CONFIG_RESET_BERLIN is not set
+# CONFIG_RESET_IMX7 is not set
+# CONFIG_RESET_LANTIQ is not set
+# CONFIG_RESET_LPC18XX is not set
+# CONFIG_RESET_MESON is not set
+# CONFIG_RESET_PISTACHIO is not set
+# CONFIG_RESET_SIMPLE is not set
+# CONFIG_RESET_SUNXI is not set
+CONFIG_RESET_TI_SYSCON=m
+# CONFIG_RESET_ZYNQ is not set
+# CONFIG_RESET_TEGRA_BPMP is not set
+CONFIG_FMC=m
+CONFIG_FMC_FAKEDEV=m
+CONFIG_FMC_TRIVIAL=m
+CONFIG_FMC_WRITE_EEPROM=m
+CONFIG_FMC_CHARDEV=m
+
+#
+# PHY Subsystem
+#
+CONFIG_GENERIC_PHY=y
+CONFIG_BCM_KONA_USB2_PHY=m
+CONFIG_PHY_PXA_28NM_HSIC=m
+CONFIG_PHY_PXA_28NM_USB2=m
+CONFIG_PHY_CPCAP_USB=m
+CONFIG_PHY_QCOM_USB_HS=m
+CONFIG_PHY_QCOM_USB_HSIC=m
+CONFIG_PHY_SAMSUNG_USB2=m
+# CONFIG_PHY_EXYNOS4210_USB2 is not set
+# CONFIG_PHY_EXYNOS4X12_USB2 is not set
+# CONFIG_PHY_EXYNOS5250_USB2 is not set
+CONFIG_PHY_TUSB1210=m
+CONFIG_POWERCAP=y
+CONFIG_INTEL_RAPL=m
+CONFIG_MCB=m
+CONFIG_MCB_PCI=m
+CONFIG_MCB_LPC=m
+
+#
+# Performance monitor support
+#
+CONFIG_RAS=y
+CONFIG_RAS_CEC=y
+CONFIG_THUNDERBOLT=m
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+CONFIG_LIBNVDIMM=y
+CONFIG_BLK_DEV_PMEM=m
+CONFIG_ND_BLK=m
+CONFIG_ND_CLAIM=y
+CONFIG_ND_BTT=m
+CONFIG_BTT=y
+CONFIG_ND_PFN=m
+CONFIG_NVDIMM_PFN=y
+CONFIG_NVDIMM_DAX=y
+CONFIG_DAX=y
+CONFIG_DEV_DAX=m
+CONFIG_DEV_DAX_PMEM=m
+CONFIG_NVMEM=y
+CONFIG_STM=m
+# CONFIG_STM_DUMMY is not set
+CONFIG_STM_SOURCE_CONSOLE=m
+CONFIG_STM_SOURCE_HEARTBEAT=m
+CONFIG_STM_SOURCE_FTRACE=m
+CONFIG_INTEL_TH=m
+CONFIG_INTEL_TH_PCI=m
+CONFIG_INTEL_TH_GTH=m
+CONFIG_INTEL_TH_STH=m
+CONFIG_INTEL_TH_MSU=m
+CONFIG_INTEL_TH_PTI=m
+# CONFIG_INTEL_TH_DEBUG is not set
+CONFIG_FPGA=m
+CONFIG_ALTERA_PR_IP_CORE=m
+CONFIG_ALTERA_PR_IP_CORE_PLAT=m
+CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
+CONFIG_FPGA_MGR_ALTERA_CVP=m
+CONFIG_FPGA_MGR_XILINX_SPI=m
+CONFIG_FPGA_MGR_ICE40_SPI=m
+CONFIG_FPGA_BRIDGE=m
+CONFIG_XILINX_PR_DECOUPLER=m
+CONFIG_FPGA_REGION=m
+CONFIG_OF_FPGA_REGION=m
+CONFIG_FSI=m
+CONFIG_FSI_MASTER_GPIO=m
+CONFIG_FSI_MASTER_HUB=m
+CONFIG_FSI_SCOM=m
+CONFIG_MULTIPLEXER=m
+
+#
+# Multiplexer drivers
+#
+CONFIG_MUX_ADG792A=m
+CONFIG_MUX_GPIO=m
+CONFIG_MUX_MMIO=m
+CONFIG_PM_OPP=y
+# CONFIG_UNISYS_VISORBUS is not set
+CONFIG_SIOX=m
+CONFIG_SIOX_BUS_GPIO=m
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_CTRL=m
+
+#
+# Firmware Drivers
+#
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
+CONFIG_DMIID=y
+CONFIG_DMI_SYSFS=m
+CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_ISCSI_IBFT=m
+CONFIG_FW_CFG_SYSFS=m
+# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# EFI (Extensible Firmware Interface) Support
+#
+# CONFIG_EFI_VARS is not set
+CONFIG_EFI_ESRT=y
+CONFIG_EFI_RUNTIME_MAP=y
+# CONFIG_EFI_FAKE_MEMMAP is not set
+CONFIG_EFI_RUNTIME_WRAPPERS=y
+CONFIG_EFI_CAPSULE_LOADER=m
+# CONFIG_EFI_TEST is not set
+CONFIG_APPLE_PROPERTIES=y
+CONFIG_RESET_ATTACK_MITIGATION=y
+CONFIG_UEFI_CPER=y
+CONFIG_EFI_DEV_PATH_PARSER=y
+
+#
+# Tegra firmware driver
+#
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_FS_IOMAP=y
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_USE_FOR_EXT2=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=m
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=m
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+# CONFIG_JFS_DEBUG is not set
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_XFS_ONLINE_SCRUB=y
+# CONFIG_XFS_WARN is not set
+# CONFIG_XFS_DEBUG is not set
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_OCFS2_FS_O2CB=m
+CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
+CONFIG_OCFS2_FS_STATS=y
+CONFIG_OCFS2_DEBUG_MASKLOG=y
+# CONFIG_OCFS2_DEBUG_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
+# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
+# CONFIG_BTRFS_DEBUG is not set
+# CONFIG_BTRFS_ASSERT is not set
+# CONFIG_BTRFS_FS_REF_VERIFY is not set
+CONFIG_NILFS2_FS=m
+CONFIG_F2FS_FS=m
+CONFIG_F2FS_STAT_FS=y
+CONFIG_F2FS_FS_XATTR=y
+CONFIG_F2FS_FS_POSIX_ACL=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_CHECK_FS=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+# CONFIG_F2FS_IO_TRACE is not set
+# CONFIG_F2FS_FAULT_INJECTION is not set
+CONFIG_FS_DAX=y
+CONFIG_FS_DAX_PMD=y
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_FS_ENCRYPTION=m
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=m
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_QUOTACTL=y
+CONFIG_QUOTACTL_COMPAT=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
+# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set
+CONFIG_OVERLAY_FS_INDEX=y
+# CONFIG_OVERLAY_FS_NFS_EXPORT is not set
+
+#
+# Caches
+#
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_DEFAULT_UTF8=y
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+# CONFIG_PROC_VMCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_ORANGEFS_FS=m
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+# CONFIG_ECRYPT_FS_MESSAGING is not set
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HFSPLUS_FS_POSIX_ACL=y
+CONFIG_BEFS_FS=m
+# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=m
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_UBIFS_FS_ENCRYPTION=y
+CONFIG_UBIFS_FS_SECURITY=y
+CONFIG_CRAMFS=m
+CONFIG_CRAMFS_BLOCKDEV=y
+CONFIG_CRAMFS_MTD=y
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_FILE_CACHE is not set
+CONFIG_SQUASHFS_FILE_DIRECT=y
+# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
+# CONFIG_SQUASHFS_DECOMP_MULTI is not set
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_ZLIB_COMPRESS=y
+# CONFIG_PSTORE_LZO_COMPRESS is not set
+# CONFIG_PSTORE_LZ4_COMPRESS is not set
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_PMSG is not set
+# CONFIG_PSTORE_FTRACE is not set
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
+CONFIG_EXOFS_FS=m
+# CONFIG_EXOFS_DEBUG is not set
+CONFIG_ORE=m
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V2=m
+CONFIG_NFS_V3=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_PNFS_FILE_LAYOUT=m
+CONFIG_PNFS_BLOCK=m
+CONFIG_PNFS_FLEXFILE_LAYOUT=m
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="linux-libre.fsfla.org"
+CONFIG_NFS_V4_1_MIGRATION=y
+CONFIG_NFS_V4_SECURITY_LABEL=y
+CONFIG_NFS_FSCACHE=y
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_PNFS=y
+CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_NFSD_SCSILAYOUT=y
+# CONFIG_NFSD_FLEXFILELAYOUT is not set
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+# CONFIG_NFSD_FAULT_INJECTION is not set
+CONFIG_GRACE_PERIOD=m
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_SUNRPC_BACKCHANNEL=y
+CONFIG_SUNRPC_SWAP=y
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_SUNRPC_XPRT_RDMA=m
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_FSCACHE=y
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+# CONFIG_CIFS_STATS2 is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_ACL=y
+CONFIG_CIFS_DEBUG=y
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SMB311=y
+# CONFIG_CIFS_SMB_DIRECT is not set
+CONFIG_CIFS_FSCACHE=y
+CONFIG_CODA_FS=m
+CONFIG_AFS_FS=m
+# CONFIG_AFS_DEBUG is not set
+CONFIG_AFS_FSCACHE=y
+CONFIG_9P_FS=m
+CONFIG_9P_FSCACHE=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+# CONFIG_DLM_DEBUG is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+
+#
+# printk and dmesg options
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=4
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
+# CONFIG_BOOT_PRINTK_DELAY is not set
+CONFIG_DYNAMIC_DEBUG=y
+
+#
+# Compile-time checks and compiler options
+#
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
+CONFIG_UNUSED_SYMBOLS=y
+# CONFIG_PAGE_OWNER is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_STACK_VALIDATION=y
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0
+CONFIG_MAGIC_SYSRQ_SERIAL=y
+CONFIG_DEBUG_KERNEL=y
+
+#
+# Memory Debugging
+#
+# CONFIG_PAGE_EXTENSION is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_DEBUG_PAGE_REF is not set
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
+# CONFIG_DEBUG_VIRTUAL is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_HAVE_ARCH_KASAN=y
+# CONFIG_KASAN is not set
+CONFIG_ARCH_HAS_KCOV=y
+# CONFIG_KCOV is not set
+# CONFIG_DEBUG_SHIRQ is not set
+
+#
+# Debug Lockups and Hangs
+#
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR_PERF=y
+CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_WQ_WATCHDOG is not set
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_ON_OOPS_VALUE=1
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHED_INFO=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_TIMEKEEPING is not set
+CONFIG_DEBUG_PREEMPT=y
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+# CONFIG_WW_MUTEX_SELFTEST is not set
+CONFIG_STACKTRACE=y
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_LIST=y
+# CONFIG_DEBUG_PI_LIST is not set
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU is not set
+# CONFIG_TORTURE_TEST is not set
+# CONFIG_RCU_PERF_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RCU_EQS_DEBUG is not set
+# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_FUNCTION_ERROR_INJECTION=y
+CONFIG_LATENCYTOP=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_FENTRY=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+# CONFIG_PREEMPTIRQ_EVENTS is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_HWLAT_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBE_EVENTS=y
+CONFIG_UPROBE_EVENTS=y
+CONFIG_BPF_EVENTS=y
+CONFIG_PROBE_EVENTS=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_BPF_KPROBE_OVERRIDE is not set
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+CONFIG_MMIOTRACE=y
+# CONFIG_HIST_TRIGGERS is not set
+# CONFIG_MMIOTRACE_TEST is not set
+# CONFIG_TRACEPOINT_BENCHMARK is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_TRACE_EVAL_MAP_FILE is not set
+CONFIG_TRACING_EVENTS_GPIO=y
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_DMA_API_DEBUG is not set
+CONFIG_RUNTIME_TESTING_MENU=y
+CONFIG_LKDTM=m
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_TEST_SORT is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_PERCPU_TEST is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_ASYNC_RAID6_TEST is not set
+# CONFIG_TEST_HEXDUMP is not set
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_TEST_PRINTF is not set
+# CONFIG_TEST_BITMAP is not set
+# CONFIG_TEST_UUID is not set
+# CONFIG_TEST_RHASHTABLE is not set
+# CONFIG_TEST_HASH is not set
+# CONFIG_TEST_PARMAN is not set
+# CONFIG_TEST_LKM is not set
+# CONFIG_TEST_USER_COPY is not set
+# CONFIG_TEST_BPF is not set
+# CONFIG_FIND_BIT_BENCHMARK is not set
+# CONFIG_TEST_FIRMWARE is not set
+# CONFIG_TEST_SYSCTL is not set
+# CONFIG_TEST_UDELAY is not set
+# CONFIG_TEST_STATIC_KEYS is not set
+# CONFIG_TEST_KMOD is not set
+# CONFIG_MEMTEST is not set
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
+# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set
+# CONFIG_UBSAN is not set
+CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
+# CONFIG_X86_VERBOSE_BOOTUP is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_EARLY_PRINTK_EFI=y
+# CONFIG_EARLY_PRINTK_USB_XDBC is not set
+CONFIG_X86_PTDUMP_CORE=y
+# CONFIG_X86_PTDUMP is not set
+# CONFIG_EFI_PGT_DUMP is not set
+CONFIG_DEBUG_WX=y
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_DEBUG is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+# CONFIG_OPTIMIZE_INLINING is not set
+# CONFIG_DEBUG_ENTRY is not set
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+# CONFIG_X86_DEBUG_FPU is not set
+# CONFIG_PUNIT_ATOM_DEBUG is not set
+CONFIG_UNWINDER_ORC=y
+# CONFIG_UNWINDER_FRAME_POINTER is not set
+# CONFIG_UNWINDER_GUESS is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+CONFIG_KEYS_COMPAT=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
+CONFIG_TRUSTED_KEYS=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_DH_OPERATIONS=y
+CONFIG_SECURITY_DMESG_RESTRICT=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY_TIOCSTI_RESTRICT=y
+CONFIG_SECURITY=y
+# CONFIG_SECURITY_WRITABLE_HOOKS is not set
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_PAGE_TABLE_ISOLATION=y
+CONFIG_SECURITY_INFINIBAND=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_SECURITY_PATH=y
+# CONFIG_INTEL_TXT is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
+CONFIG_FORTIFY_SOURCE=y
+# CONFIG_FORTIFY_SOURCE_STRICT_STRING is not set
+CONFIG_PAGE_SANITIZE=y
+CONFIG_PAGE_SANITIZE_VERIFY=y
+# CONFIG_STATIC_USERMODEHELPER is not set
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+# CONFIG_SECURITY_SELINUX_DISABLE is not set
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_LOADPIN is not set
+CONFIG_SECURITY_YAMA=y
+CONFIG_INTEGRITY=y
+# CONFIG_INTEGRITY_SIGNATURE is not set
+CONFIG_INTEGRITY_AUDIT=y
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_AKCIPHER2=y
+CONFIG_CRYPTO_AKCIPHER=y
+CONFIG_CRYPTO_KPP2=y
+CONFIG_CRYPTO_KPP=y
+CONFIG_CRYPTO_ACOMP2=y
+CONFIG_CRYPTO_RSA=y
+CONFIG_CRYPTO_DH=y
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ABLK_HELPER=m
+CONFIG_CRYPTO_SIMD=m
+CONFIG_CRYPTO_GLUE_HELPER_X86=m
+CONFIG_CRYPTO_ENGINE=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_ECHAINIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRC32_PCLMUL=m
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
+CONFIG_CRYPTO_GHASH=y
+CONFIG_CRYPTO_POLY1305=m
+CONFIG_CRYPTO_POLY1305_X86_64=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA1_SSSE3=m
+CONFIG_CRYPTO_SHA256_SSSE3=m
+CONFIG_CRYPTO_SHA512_SSSE3=m
+CONFIG_CRYPTO_SHA1_MB=m
+CONFIG_CRYPTO_SHA256_MB=m
+CONFIG_CRYPTO_SHA512_MB=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_AES_X86_64=m
+CONFIG_CRYPTO_AES_NI_INTEL=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_BLOWFISH_COMMON=m
+CONFIG_CRYPTO_BLOWFISH_X86_64=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAMELLIA_X86_64=m
+CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
+CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
+CONFIG_CRYPTO_CAST_COMMON=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST5_AVX_X86_64=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_CAST6_AVX_X86_64=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_DES3_EDE_X86_64=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SALSA20_X86_64=m
+CONFIG_CRYPTO_CHACHA20=m
+CONFIG_CRYPTO_CHACHA20_X86_64=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
+CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
+CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_TWOFISH_X86_64=m
+CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
+CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_USER_API=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_PADLOCK=m
+CONFIG_CRYPTO_DEV_PADLOCK_AES=m
+CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
+# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set
+CONFIG_CRYPTO_DEV_CCP=y
+CONFIG_CRYPTO_DEV_CCP_DD=m
+CONFIG_CRYPTO_DEV_SP_CCP=y
+CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
+CONFIG_CRYPTO_DEV_SP_PSP=y
+CONFIG_CRYPTO_DEV_QAT=m
+CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
+CONFIG_CRYPTO_DEV_QAT_C3XXX=m
+CONFIG_CRYPTO_DEV_QAT_C62X=m
+CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
+CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
+CONFIG_CRYPTO_DEV_QAT_C62XVF=m
+CONFIG_CRYPTO_DEV_NITROX=m
+CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
+CONFIG_CRYPTO_DEV_CHELSIO=m
+CONFIG_CHELSIO_IPSEC_INLINE=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_PKCS7_MESSAGE_PARSER=y
+# CONFIG_PKCS7_TEST_KEY is not set
+CONFIG_SIGNED_PE_FILE_VERIFICATION=y
+
+#
+# Certificates for signature checking
+#
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS=""
+# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
+CONFIG_SECONDARY_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQFD=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
+CONFIG_HAVE_KVM_EVENTFD=y
+CONFIG_KVM_MMIO=y
+CONFIG_KVM_ASYNC_PF=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
+CONFIG_KVM_VFIO=y
+CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
+CONFIG_KVM_COMPAT=y
+CONFIG_HAVE_KVM_IRQ_BYPASS=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+CONFIG_KVM_AMD=m
+CONFIG_KVM_AMD_SEV=y
+CONFIG_KVM_MMU_AUDIT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_SCSI=m
+CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST=m
+# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_RAID6_PQ=m
+CONFIG_BITREVERSE=y
+# CONFIG_HAVE_ARCH_BITREVERSE is not set
+CONFIG_RATIONAL=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+CONFIG_CRC4=m
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+CONFIG_CRC8=m
+CONFIG_XXHASH=m
+# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_842_COMPRESS=m
+CONFIG_842_DECOMPRESS=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_LZ4_COMPRESS=m
+CONFIG_LZ4HC_COMPRESS=m
+CONFIG_LZ4_DECOMPRESS=y
+CONFIG_ZSTD_COMPRESS=m
+CONFIG_ZSTD_DECOMPRESS=m
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_DECOMPRESS_LZ4=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_BCH=m
+CONFIG_BCH_CONST_PARAMS=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_BTREE=y
+CONFIG_INTERVAL_TREE=y
+CONFIG_RADIX_TREE_MULTIORDER=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_SGL_ALLOC=y
+# CONFIG_DMA_DIRECT_OPS is not set
+CONFIG_DMA_VIRT_OPS=y
+CONFIG_CHECK_SIGNATURE=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_GLOB=y
+# CONFIG_GLOB_SELFTEST is not set
+CONFIG_NLATTR=y
+CONFIG_LRU_CACHE=m
+CONFIG_CLZ_TAB=y
+CONFIG_CORDIC=m
+CONFIG_DDR=y
+CONFIG_IRQ_POLL=y
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
+CONFIG_UCS2_STRING=y
+CONFIG_FONT_SUPPORT=y
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_6x10 is not set
+# CONFIG_FONT_10x18 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+CONFIG_FONT_AUTOSELECT=y
+# CONFIG_SG_SPLIT is not set
+CONFIG_SG_POOL=y
+CONFIG_ARCH_HAS_SG_CHAIN=y
+CONFIG_ARCH_HAS_PMEM_API=y
+CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
+CONFIG_SBITMAP=y
+CONFIG_PARMAN=m
+# CONFIG_STRING_SELFTEST is not set
diff --git a/drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch b/drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch
new file mode 100644
index 0000000..6421206
--- /dev/null
+++ b/drm-i915-edp-Only-use-the-alternate-fixed-mode-if-its-asked-for.patch
@@ -0,0 +1,39 @@
+From e9b8250407ae73faa7ac543f7f260b4b2f34ebd8 Mon Sep 17 00:00:00 2001
+From: Jim Bride <jim.bride@linux.intel.com>
+Date: Mon, 6 Nov 2017 13:38:57 -0800
+Subject: [PATCH] drm/i915/edp: Only use the alternate fixed mode if it's asked
+ for
+
+In commit dc911f5bd8aa ("drm/i915/edp: Allow alternate fixed mode for
+eDP if available."), the patch allows for the use of an alternate fixed
+mode if it is available, but the patch was not ensuring that the only
+time the alternate mode is used is when it is specifically requested.
+This patch adds an additional comparison to intel_edp_compare_alt_mode
+to ensure that we only use the alternate mode if it is directly
+requested.
+
+Fixes: dc911f5bd8aac ("Allow alternate fixed mode for eDP if available.")
+Cc: David Weinehall <david.weinehall@linux.intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Jim Bride <jim.bride@linux.intel.com>
+---
+ drivers/gpu/drm/i915/intel_dp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d27c014..8164c59 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1621,7 +1621,8 @@ static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
+ m1->vdisplay == m2->vdisplay &&
+ m1->vsync_start == m2->vsync_start &&
+ m1->vsync_end == m2->vsync_end &&
+- m1->vtotal == m2->vtotal);
++ m1->vtotal == m2->vtotal &&
++ m1->vrefresh == m2->vrefresh);
+ return bres;
+ }
+
+--
+2.7.4
+
diff --git a/fix-vboxguest-on-guests-with-more-than-4G-RAM.patch b/fix-vboxguest-on-guests-with-more-than-4G-RAM.patch
new file mode 100644
index 0000000..2869ff8
--- /dev/null
+++ b/fix-vboxguest-on-guests-with-more-than-4G-RAM.patch
@@ -0,0 +1,549 @@
+From a117a2995f291e765c5de06f42f02a1687ecb55e Mon Sep 17 00:00:00 2001
+From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com>
+Date: Wed, 11 Apr 2018 21:27:44 +0200
+Subject: Fix vboxguest on guests with more than 4G RAM
+
+Squashed commit of the following:
+
+commit 042b191f6b98165d6bcca3ae09a0f9b289d6155e
+Author: Hans de Goede <hdegoede@redhat.com>
+Date: Thu Mar 29 17:28:57 2018 +0200
+
+ virt: vbox: Log an error when we fail to get the host version
+
+ This was the only error path during probe without a message being logged
+ about what went wrong, this fixes this.
+
+ Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+
+commit e4111a6c617687f7cb414ddfa8176206910db76e
+Author: Hans de Goede <hdegoede@redhat.com>
+Date: Thu Mar 29 17:28:56 2018 +0200
+
+ virt: vbox: Use __get_free_pages instead of kmalloc for DMA32 memory
+
+ It is not possible to get DMA32 zone memory through kmalloc, causing
+ the vboxguest driver to malfunction due to getting memory above
+ 4G which the PCI device cannot handle.
+
+ This commit changes the kmalloc calls where the 4G limit matters to
+ using __get_free_pages() fixing vboxguest not working on x86_64 guests
+ with more then 4G RAM.
+
+ Cc: stable@vger.kernel.org
+ Reported-by: Eloy Coto Pereiro <eloy.coto@gmail.com>
+ Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 2cb20368ce32e7275a351eadadd4c8f3da742a28
+Author: Hans de Goede <hdegoede@redhat.com>
+Date: Thu Mar 29 17:28:55 2018 +0200
+
+ virt: vbox: Add vbg_req_free() helper function
+
+ This is a preparation patch for fixing issues on x86_64 virtual-machines
+ with more then 4G of RAM, atm we pass __GFP_DMA32 to kmalloc, but kmalloc
+ does not honor that, so we need to switch to get_pages, which means we
+ will not be able to use kfree to free memory allocated with vbg_alloc_req.
+
+ While at it also remove a comment on a vbg_alloc_req call which talks
+ about Windows (inherited from the vbox upstream cross-platform code).
+
+ Cc: stable@vger.kernel.org
+ Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+
+commit fa5c012bc9c3e1ada5cde0bfa3c6706be97b7cb0
+Author: Hans de Goede <hdegoede@redhat.com>
+Date: Thu Mar 29 17:28:54 2018 +0200
+
+ virt: vbox: Move declarations of vboxguest private functions to private header
+
+ Move the declarations of functions from vboxguest_utils.c which are only
+ meant for vboxguest internal use from include/linux/vbox_utils.h to
+ drivers/virt/vboxguest/vboxguest_core.h.
+
+ Cc: stable@vger.kernel.org
+ Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 70 +++++++++++++-----------
+ drivers/virt/vboxguest/vboxguest_core.h | 9 +++
+ drivers/virt/vboxguest/vboxguest_linux.c | 19 ++++++-
+ drivers/virt/vboxguest/vboxguest_utils.c | 17 ++++--
+ include/linux/vbox_utils.h | 23 --------
+ 5 files changed, 76 insertions(+), 62 deletions(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 190dbf8cfcb5..2f3856a95856 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
+ }
+
+ out:
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ kfree(pages);
+ }
+
+@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
+
+ rc = vbg_req_perform(gdev, req);
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+
+ if (rc < 0) {
+ vbg_err("%s error: %d\n", __func__, rc);
+@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
+ ret = vbg_status_code_to_errno(rc);
+
+ out_free:
+- kfree(req2);
+- kfree(req1);
++ vbg_req_free(req2, sizeof(*req2));
++ vbg_req_free(req1, sizeof(*req1));
+ return ret;
+ }
+
+@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
+ if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
+ rc = VINF_SUCCESS;
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+
+ return vbg_status_code_to_errno(rc);
+ }
+@@ -431,58 +431,52 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
+ rc = vbg_req_perform(gdev, req);
+ do_div(req->interval_ns, 1000000); /* ns -> ms */
+ gdev->heartbeat_interval_ms = req->interval_ns;
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+
+ return vbg_status_code_to_errno(rc);
+ }
+
+ /**
+ * Initializes the heartbeat timer. This feature may be disabled by the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+ static int vbg_heartbeat_init(struct vbg_dev *gdev)
+ {
+ int ret;
+
+ /* Make sure that heartbeat checking is disabled if we fail. */
+ ret = vbg_heartbeat_host_config(gdev, false);
+ if (ret < 0)
+ return ret;
+
+ ret = vbg_heartbeat_host_config(gdev, true);
+ if (ret < 0)
+ return ret;
+
+- /*
+- * Preallocate the request to use it from the timer callback because:
+- * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+- * and the timer callback runs at DISPATCH_LEVEL;
+- * 2) avoid repeated allocations.
+- */
+ gdev->guest_heartbeat_req = vbg_req_alloc(
+ sizeof(*gdev->guest_heartbeat_req),
+ VMMDEVREQ_GUEST_HEARTBEAT);
+ if (!gdev->guest_heartbeat_req)
+ return -ENOMEM;
+
+ vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
+ __func__, gdev->heartbeat_interval_ms);
+ mod_timer(&gdev->heartbeat_timer, 0);
+
+ return 0;
+ }
+
+ /**
+ * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * @gdev: The Guest extension device.
+ */
+ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
+ {
+ del_timer_sync(&gdev->heartbeat_timer);
+ vbg_heartbeat_host_config(gdev, false);
+- kfree(gdev->guest_heartbeat_req);
+-
++ vbg_req_free(gdev->guest_heartbeat_req,
++ sizeof(*gdev->guest_heartbeat_req));
+ }
+
+ /**
+@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ return vbg_status_code_to_errno(rc);
+ }
+
+@@ -617,32 +611,32 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
+
+ out:
+ mutex_unlock(&gdev->session_mutex);
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+
+ return ret;
+ }
+
+ /**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+ {
+ struct vmmdev_mask *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ if (!req)
+ return -ENOMEM;
+
+ req->not_mask = U32_MAX;
+ req->or_mask = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ return vbg_status_code_to_errno(rc);
+ }
+
+@@ -712,44 +706,46 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+
+ out:
+ mutex_unlock(&gdev->session_mutex);
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+
+ return ret;
+ }
+
+ /**
+ * vbg_query_host_version get the host feature mask and version information.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+ static int vbg_query_host_version(struct vbg_dev *gdev)
+ {
+ struct vmmdev_host_version *req;
+ int rc, ret;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+ if (!req)
+ return -ENOMEM;
+
+ rc = vbg_req_perform(gdev, req);
+ ret = vbg_status_code_to_errno(rc);
+- if (ret)
++ if (ret) {
++ vbg_err("%s error: %d\n", __func__, rc);
+ goto out;
++ }
+
+ snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
+ req->major, req->minor, req->build, req->revision);
+ gdev->host_features = req->features;
+
+ vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
+ gdev->host_features);
+
+ if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+ vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+ ret = -ENODEV;
+ }
+
+ out:
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ return ret;
+ }
+
+@@ -847,36 +843,46 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
+ return 0;
+
+ err_free_reqs:
+- kfree(gdev->mouse_status_req);
+- kfree(gdev->ack_events_req);
+- kfree(gdev->cancel_req);
+- kfree(gdev->mem_balloon.change_req);
+- kfree(gdev->mem_balloon.get_req);
++ vbg_req_free(gdev->mouse_status_req,
++ sizeof(*gdev->mouse_status_req));
++ vbg_req_free(gdev->ack_events_req,
++ sizeof(*gdev->ack_events_req));
++ vbg_req_free(gdev->cancel_req,
++ sizeof(*gdev->cancel_req));
++ vbg_req_free(gdev->mem_balloon.change_req,
++ sizeof(*gdev->mem_balloon.change_req));
++ vbg_req_free(gdev->mem_balloon.get_req,
++ sizeof(*gdev->mem_balloon.get_req));
+ return ret;
+ }
+
+ /**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ * @gdev: The Guest extension device.
+ */
+ void vbg_core_exit(struct vbg_dev *gdev)
+ {
+ vbg_heartbeat_exit(gdev);
+ vbg_guest_mappings_exit(gdev);
+
+ /* Clear the host flags (mouse status etc). */
+ vbg_reset_host_event_filter(gdev, 0);
+ vbg_reset_host_capabilities(gdev);
+ vbg_core_set_mouse_status(gdev, 0);
+
+- kfree(gdev->mouse_status_req);
+- kfree(gdev->ack_events_req);
+- kfree(gdev->cancel_req);
+- kfree(gdev->mem_balloon.change_req);
+- kfree(gdev->mem_balloon.get_req);
++ vbg_req_free(gdev->mouse_status_req,
++ sizeof(*gdev->mouse_status_req));
++ vbg_req_free(gdev->ack_events_req,
++ sizeof(*gdev->ack_events_req));
++ vbg_req_free(gdev->cancel_req,
++ sizeof(*gdev->cancel_req));
++ vbg_req_free(gdev->mem_balloon.change_req,
++ sizeof(*gdev->mem_balloon.change_req));
++ vbg_req_free(gdev->mem_balloon.get_req,
++ sizeof(*gdev->mem_balloon.get_req));
+ }
+
+ /**
+@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ req->flags = dump->u.in.flags;
+ dump->hdr.rc = vbg_req_perform(gdev, req);
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ return 0;
+ }
+
+@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+- kfree(req);
++ vbg_req_free(req, sizeof(*req));
+ return vbg_status_code_to_errno(rc);
+ }
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
+index 6c784bf4fa6d..7ad9ec45bfa9 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.h
++++ b/drivers/virt/vboxguest/vboxguest_core.h
+@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+ void vbg_linux_mouse_event(struct vbg_dev *gdev);
+
++/* Private (non exported) functions form vboxguest_utils.c */
++void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
++void vbg_req_free(void *req, size_t len);
++int vbg_req_perform(struct vbg_dev *gdev, void *req);
++int vbg_hgcm_call32(
++ struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
++ struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
++ int *vbox_status);
++
+ #endif
+diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
+index 82e280d38cc2..398d22693234 100644
+--- a/drivers/virt/vboxguest/vboxguest_linux.c
++++ b/drivers/virt/vboxguest/vboxguest_linux.c
+@@ -87,52 +87,65 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ struct vbg_session *session = filp->private_data;
+ size_t returned_size, size;
+ struct vbg_ioctl_hdr hdr;
++ bool is_vmmdev_req;
+ int ret = 0;
+ void *buf;
+
+ if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.version != VBG_IOCTL_HDR_VERSION)
+ return -EINVAL;
+
+ if (hdr.size_in < sizeof(hdr) ||
+ (hdr.size_out && hdr.size_out < sizeof(hdr)))
+ return -EINVAL;
+
+ size = max(hdr.size_in, hdr.size_out);
+ if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
+ return -EINVAL;
+ if (size > SZ_16M)
+ return -E2BIG;
+
+- /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
+- buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
++ /*
++ * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
++ * the need for a bounce-buffer and another copy later on.
++ */
++ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
++
++ if (is_vmmdev_req)
++ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
++ else
++ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (hdr.size_in < size)
+ memset(buf + hdr.size_in, 0, size - hdr.size_in);
+
+ ret = vbg_core_ioctl(session, req, buf);
+ if (ret)
+ goto out;
+
+ returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
+ if (returned_size > size) {
+ vbg_debug("%s: too much output data %zu > %zu\n",
+ __func__, returned_size, size);
+ returned_size = size;
+ }
+ if (copy_to_user((void *)arg, buf, returned_size) != 0)
+ ret = -EFAULT;
+
+ out:
+- kfree(buf);
++ if (is_vmmdev_req)
++ vbg_req_free(buf, size);
++ else
++ kfree(buf);
+
+ return ret;
+ }
+diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
+index 0f0dab8023cf..bf4474214b4d 100644
+--- a/drivers/virt/vboxguest/vboxguest_utils.c
++++ b/drivers/virt/vboxguest/vboxguest_utils.c
+@@ -65,23 +65,32 @@ VBG_LOG(vbg_debug, pr_debug);
+ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+ {
+ struct vmmdev_request_header *req;
++ int order = get_order(PAGE_ALIGN(len));
+
+- req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
++ req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
+ if (!req)
+ return NULL;
+
+ memset(req, 0xaa, len);
+
+ req->size = len;
+ req->version = VMMDEV_REQUEST_HEADER_VERSION;
+ req->request_type = req_type;
+ req->rc = VERR_GENERAL_FAILURE;
+ req->reserved1 = 0;
+ req->reserved2 = 0;
+
+ return req;
+ }
+
++void vbg_req_free(void *req, size_t len)
++{
++ if (!req)
++ return;
++
++ free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
++}
++
+ /* Note this function returns a VBox status code, not a negative errno!! */
+ int vbg_req_perform(struct vbg_dev *gdev, void *req)
+ {
+@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
+ rc = hgcm_connect->header.result;
+ }
+
+- kfree(hgcm_connect);
++ vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
+
+ *vbox_status = rc;
+ return 0;
+@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+ if (rc >= 0)
+ rc = hgcm_disconnect->header.result;
+
+- kfree(hgcm_disconnect);
++ vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
+
+ *vbox_status = rc;
+ return 0;
+@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ }
+
+ if (!leak_it)
+- kfree(call);
++ vbg_req_free(call, size);
+
+ free_bounce_bufs:
+ if (bounce_bufs) {
+diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
+index c71def6b310f..a240ed2a0372 100644
+--- a/include/linux/vbox_utils.h
++++ b/include/linux/vbox_utils.h
+@@ -24,39 +24,16 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
+ #define vbg_debug pr_debug
+ #endif
+
+-/**
+- * Allocate memory for generic request and initialize the request header.
+- *
+- * Return: the allocated memory
+- * @len: Size of memory block required for the request.
+- * @req_type: The generic request type.
+- */
+-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+-
+-/**
+- * Perform a generic request.
+- *
+- * Return: VBox status code
+- * @gdev: The Guest extension device.
+- * @req: Pointer to the request structure.
+- */
+-int vbg_req_perform(struct vbg_dev *gdev, void *req);
+-
+ int vbg_hgcm_connect(struct vbg_dev *gdev,
+ struct vmmdev_hgcm_service_location *loc,
+ u32 *client_id, int *vbox_status);
+
+ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+
+ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+ u32 parm_count, int *vbox_status);
+
+-int vbg_hgcm_call32(
+- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+- int *vbox_status);
+-
+ /**
+ * Convert a VirtualBox status code to a standard Linux kernel return value.
+ * Return: 0 or negative errno value.
+--
+2.17.0
+
diff --git a/linux.install b/linux.install
new file mode 100644
index 0000000..a20b44c
--- /dev/null
+++ b/linux.install
@@ -0,0 +1,11 @@
+post_upgrade() {
+ if findmnt --fstab -uno SOURCE /boot &>/dev/null && ! mountpoint -q /boot; then
+ echo "WARNING: /boot appears to be a separate partition but is not mounted."
+ fi
+
+}
+
+post_remove() {
+ rm -f boot/initramfs-%PKGBASE%.img
+ rm -f boot/initramfs-%PKGBASE%-fallback.img
+}
diff --git a/linux.preset b/linux.preset
new file mode 100644
index 0000000..66709a8
--- /dev/null
+++ b/linux.preset
@@ -0,0 +1,14 @@
+# mkinitcpio preset file for the '%PKGBASE%' package
+
+ALL_config="/etc/mkinitcpio.conf"
+ALL_kver="/boot/vmlinuz-%PKGBASE%"
+
+PRESETS=('default' 'fallback')
+
+#default_config="/etc/mkinitcpio.conf"
+default_image="/boot/initramfs-%PKGBASE%.img"
+#default_options=""
+
+#fallback_config="/etc/mkinitcpio.conf"
+fallback_image="/boot/initramfs-%PKGBASE%-fallback.img"
+fallback_options="-S autodetect"
diff --git a/patch-4.16-ck1.patch b/patch-4.16-ck1.patch
new file mode 100644
index 0000000..8e8129b
--- /dev/null
+++ b/patch-4.16-ck1.patch
@@ -0,0 +1,12161 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 1d1d53f85ddd..44a34daedc8a 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3812,6 +3812,14 @@
+ Memory area to be used by remote processor image,
+ managed by CMA.
+
++ rqshare= [X86] Select the MuQSS scheduler runqueue sharing type.
++ Format: <string>
++ smt -- Share SMT (hyperthread) sibling runqueues
++ mc -- Share MC (multicore) sibling runqueues
++ smp -- Share SMP runqueues
++ none -- So not share any runqueues
++ Default value is mc
++
+ rw [KNL] Mount root device read-write on boot
+
+ S [KNL] Run init in single mode
+diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
+new file mode 100644
+index 000000000000..c0282002a079
+--- /dev/null
++++ b/Documentation/scheduler/sched-BFS.txt
+@@ -0,0 +1,351 @@
++BFS - The Brain Fuck Scheduler by Con Kolivas.
++
++Goals.
++
++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
++completely do away with the complex designs of the past for the cpu process
++scheduler and instead implement one that is very simple in basic design.
++The main focus of BFS is to achieve excellent desktop interactivity and
++responsiveness without heuristics and tuning knobs that are difficult to
++understand, impossible to model and predict the effect of, and when tuned to
++one workload cause massive detriment to another.
++
++
++Design summary.
++
++BFS is best described as a single runqueue, O(n) lookup, earliest effective
++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
++deadline first) and my previous Staircase Deadline scheduler. Each component
++shall be described in order to understand the significance of, and reasoning for
++it. The codebase when the first stable version was released was approximately
++9000 lines less code than the existing mainline linux kernel scheduler (in
++2.6.31). This does not even take into account the removal of documentation and
++the cgroups code that is not used.
++
++Design reasoning.
++
++The single runqueue refers to the queued but not running processes for the
++entire system, regardless of the number of CPUs. The reason for going back to
++a single runqueue design is that once multiple runqueues are introduced,
++per-CPU or otherwise, there will be complex interactions as each runqueue will
++be responsible for the scheduling latency and fairness of the tasks only on its
++own runqueue, and to achieve fairness and low latency across multiple CPUs, any
++advantage in throughput of having CPU local tasks causes other disadvantages.
++This is due to requiring a very complex balancing system to at best achieve some
++semblance of fairness across CPUs and can only maintain relatively low latency
++for tasks bound to the same CPUs, not across them. To increase said fairness
++and latency across CPUs, the advantage of local runqueue locking, which makes
++for better scalability, is lost due to having to grab multiple locks.
++
++A significant feature of BFS is that all accounting is done purely based on CPU
++used and nowhere is sleep time used in any way to determine entitlement or
++interactivity. Interactivity "estimators" that use some kind of sleep/run
++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
++tasks that aren't interactive as being so. The reason for this is that it is
++close to impossible to determine that when a task is sleeping, whether it is
++doing it voluntarily, as in a userspace application waiting for input in the
++form of a mouse click or otherwise, or involuntarily, because it is waiting for
++another thread, process, I/O, kernel activity or whatever. Thus, such an
++estimator will introduce corner cases, and more heuristics will be required to
++cope with those corner cases, introducing more corner cases and failed
++interactivity detection and so on. Interactivity in BFS is built into the design
++by virtue of the fact that tasks that are waking up have not used up their quota
++of CPU time, and have earlier effective deadlines, thereby making it very likely
++they will preempt any CPU bound task of equivalent nice level. See below for
++more information on the virtual deadline mechanism. Even if they do not preempt
++a running task, because the rr interval is guaranteed to have a bound upper
++limit on how long a task will wait for, it will be scheduled within a timeframe
++that will not cause visible interface jitter.
++
++
++Design details.
++
++Task insertion.
++
++BFS inserts tasks into each relevant queue as an O(1) insertion into a double
++linked list. On insertion, *every* running queue is checked to see if the newly
++queued task can run on any idle queue, or preempt the lowest running task on the
++system. This is how the cross-CPU scheduling of BFS achieves significantly lower
++latency per extra CPU the system has. In this case the lookup is, in the worst
++case scenario, O(n) where n is the number of CPUs on the system.
++
++Data protection.
++
++BFS has one single lock protecting the process local data of every task in the
++global queue. Thus every insertion, removal and modification of task data in the
++global runqueue needs to grab the global lock. However, once a task is taken by
++a CPU, the CPU has its own local data copy of the running process' accounting
++information which only that CPU accesses and modifies (such as during a
++timer tick) thus allowing the accounting data to be updated lockless. Once a
++CPU has taken a task to run, it removes it from the global queue. Thus the
++global queue only ever has, at most,
++
++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
++
++tasks in the global queue. This value is relevant for the time taken to look up
++tasks during scheduling. This will increase if many tasks with CPU affinity set
++in their policy to limit which CPUs they're allowed to run on if they outnumber
++the number of CPUs. The +1 is because when rescheduling a task, the CPU's
++currently running task is put back on the queue. Lookup will be described after
++the virtual deadline mechanism is explained.
++
++Virtual deadline.
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in BFS is entirely in the virtual deadline mechanism. The one
++tunable in BFS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in jiffies by this equation:
++
++ jiffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases. Once a task is descheduled, it is put back on the queue, and an
++O(n) lookup of all queued-but-not-running tasks is done to determine which has
++the earliest deadline and that task is chosen to receive CPU next.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (jiffies) is
++constantly moving.
++
++Task lookup.
++
++BFS has 103 priority queues. 100 of these are dedicated to the static priority
++of realtime tasks, and the remaining 3 are, in order of best to worst priority,
++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
++scheduling). When a task of these priorities is queued, a bitmap of running
++priorities is set showing which of these priorities has tasks waiting for CPU
++time. When a CPU is made to reschedule, the lookup for the next task to get
++CPU time is performed in the following way:
++
++First the bitmap is checked to see what static priority tasks are queued. If
++any realtime priorities are found, the corresponding queue is checked and the
++first task listed there is taken (provided CPU affinity is suitable) and lookup
++is complete. If the priority corresponds to a SCHED_ISO task, they are also
++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
++stage, every task in the runlist that corresponds to that priority is checked
++to see which has the earliest set deadline, and (provided it has suitable CPU
++affinity) it is taken off the runqueue and given the CPU. If a task has an
++expired deadline, it is taken and the rest of the lookup aborted (as they are
++chosen in FIFO order).
++
++Thus, the lookup is O(n) in the worst case only, where n is as described
++earlier, as tasks may be chosen before the whole task list is looked over.
++
++
++Scalability.
++
++The major limitations of BFS will be that of scalability, as the separate
++runqueue designs will have less lock contention as the number of CPUs rises.
++However they do not scale linearly even with separate runqueues as multiple
++runqueues will need to be locked concurrently on such designs to be able to
++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
++across CPUs, and to achieve low enough latency for tasks on a busy CPU when
++other CPUs would be more suited. BFS has the advantage that it requires no
++balancing algorithm whatsoever, as balancing occurs by proxy simply because
++all CPUs draw off the global runqueue, in priority and deadline order. Despite
++the fact that scalability is _not_ the prime concern of BFS, it both shows very
++good scalability to smaller numbers of CPUs and is likely a more scalable design
++at these numbers of CPUs.
++
++It also has some very low overhead scalability features built into the design
++when it has been deemed their overhead is so marginal that they're worth adding.
++The first is the local copy of the running process' data to the CPU it's running
++on to allow that data to be updated lockless where possible. Then there is
++deference paid to the last CPU a task was running on, by trying that CPU first
++when looking for an idle CPU to use the next time it's scheduled. Finally there
++is the notion of cache locality beyond the last running CPU. The sched_domains
++information is used to determine the relative virtual "cache distance" that
++other CPUs have from the last CPU a task was running on. CPUs with shared
++caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
++as cache local. CPUs without shared caches are treated as not cache local, and
++CPUs on different NUMA nodes are treated as very distant. This "relative cache
++distance" is used by modifying the virtual deadline value when doing lookups.
++Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
++"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
++behind the doubling of deadlines is as follows. The real cost of migrating a
++task from one CPU to another is entirely dependant on the cache footprint of
++the task, how cache intensive the task is, how long it's been running on that
++CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
++how layered the CPU cache is, how fast a context switch is... and so on. In
++other words, it's close to random in the real world where we do more than just
++one sole workload. The only thing we can be sure of is that it's not free. So
++BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
++is more important than cache locality, and cache locality only plays a part
++after that. Doubling the effective deadline is based on the premise that the
++"cache local" CPUs will tend to work on the same tasks up to double the number
++of cache local CPUs, and once the workload is beyond that amount, it is likely
++that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
++is a value I pulled out of my arse.
++
++When choosing an idle CPU for a waking task, the cache locality is determined
++according to where the task last ran and then idle CPUs are ranked from best
++to worst to choose the most suitable idle CPU based on cache locality, NUMA
++node locality and hyperthread sibling business. They are chosen in the
++following preference (if idle):
++
++* Same core, idle or busy cache, idle threads
++* Other core, same cache, idle or busy cache, idle threads.
++* Same node, other CPU, idle cache, idle threads.
++* Same node, other CPU, busy cache, idle threads.
++* Same core, busy threads.
++* Other core, same cache, busy threads.
++* Same node, other CPU, busy threads.
++* Other node, other CPU, idle cache, idle threads.
++* Other node, other CPU, busy cache, idle threads.
++* Other node, other CPU, busy threads.
++
++This shows the SMT or "hyperthread" awareness in the design as well which will
++choose a real idle core first before a logical SMT sibling which already has
++tasks on the physical CPU.
++
++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
++However this benchmarking was performed on an earlier design that was far less
++scalable than the current one so it's hard to know how scalable it is in terms
++of both CPUs (due to the global runqueue) and heavily loaded machines (due to
++O(n) lookup) at this stage. Note that in terms of scalability, the number of
++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
++results are very promising indeed, without needing to tweak any knobs, features
++or options. Benchmark contributions are most welcome.
++
++
++Features
++
++As the initial prime target audience for BFS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
++support for CGROUPS. The average user should neither need to know what these
++are, nor should they need to be using them to have good desktop behaviour.
++
++rr_interval
++
++There is only one "scheduler" tunable, the round robin interval. This can be
++accessed in
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6 on a
++uniprocessor machine, and automatically set to a progressively higher value on
++multiprocessor machines. The reasoning behind increasing the value on more CPUs
++is that the effective latency is decreased by virtue of there being more CPUs on
++BFS (for reasons explained above), and increasing the value allows for less
++cache contention and more throughput. Valid values are from 1 to 1000
++Decreasing the value will decrease latencies at the cost of decreasing
++throughput, while increasing it will improve throughput, but at the cost of
++worsening latencies. The accuracy of the rr interval is limited by HZ resolution
++of the kernel configuration. Thus, the worst case latencies are usually slightly
++higher than this actual value. The default value of 6 is not an arbitrary one.
++It is based on the fact that humans can detect jitter at approximately 7ms, so
++aiming for much lower latencies is pointless under most circumstances. It is
++worth noting this fact when comparing the latency performance of BFS to other
++schedulers. Worst case latencies being higher than 7ms are far worse than
++average latencies not being in the microsecond range.
++
++Isochronous scheduling.
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of _total CPU_ available across the machine, configurable
++as a percentage in the following "resource handling" tunable (as opposed to a
++scheduler tunable):
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of BFS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++Because some applications constantly set their policy as well as their nice
++level, there is potential for them to undo the override specified by the user
++on the command line of setting the policy to SCHED_ISO. To counter this, once
++a task has been set to SCHED_ISO policy, it needs superuser privileges to set
++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
++processes and threads will also inherit the ISO policy.
++
++Idleprio scheduling.
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start
++a video encode or so on without any slowdown of other tasks. To avoid this
++policy from grabbing shared resources and holding them indefinitely, if it
++detects a state where the task is waiting on I/O, the machine is about to
++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
++per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
++be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++ schedtool -D -e ./mprime
++
++Subtick accounting.
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the
++timer tick frequency (HZ) is lowered. It is possible to create an application
++which uses almost 100% CPU, yet by being descheduled at the right time, records
++zero CPU usage. While the main problem with this is that there are possible
++security implications, it is also difficult to determine how much CPU a task
++really does use. BFS tries to use the sub-tick accounting from the TSC clock,
++where possible, to determine real CPU usage. This is not entirely reliable, but
++is far more likely to produce accurate CPU usage data than the existing designs
++and will not show tasks as consuming no CPU usage when they actually are. Thus,
++the amount of CPU reported as being used by BFS will more accurately represent
++how much CPU the task itself is using (as is shown for example by the 'time'
++application), so the reported values may be quite different to other schedulers.
++Values reported as the 'load' are more prone to problems with this design, but
++per process values are closer to real usage. When comparing throughput of BFS
++to other designs, it is important to compare the actual completed work in terms
++of total wall clock time taken and total work done, rather than the reported
++"cpu usage".
++
++
++Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
+diff --git a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
+new file mode 100644
+index 000000000000..ae28b85c9995
+--- /dev/null
++++ b/Documentation/scheduler/sched-MuQSS.txt
+@@ -0,0 +1,373 @@
++MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
++
++MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
++one 8 level skiplist per runqueue, and fine grained locking for much more
++scalability.
++
++
++Goals.
++
++The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
++here on (pronounced mux) is to completely do away with the complex designs of
++the past for the cpu process scheduler and instead implement one that is very
++simple in basic design. The main focus of MuQSS is to achieve excellent desktop
++interactivity and responsiveness without heuristics and tuning knobs that are
++difficult to understand, impossible to model and predict the effect of, and when
++tuned to one workload cause massive detriment to another, while still being
++scalable to many CPUs and processes.
++
++
++Design summary.
++
++MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
++lookup, earliest effective virtual deadline first tickless design, loosely based
++on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
++Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
++Each component shall be described in order to understand the significance of,
++and reasoning for it.
++
++
++Design reasoning.
++
++In BFS, the use of a single runqueue across all CPUs meant that each CPU would
++need to scan the entire runqueue looking for the process with the earliest
++deadline and schedule that next, regardless of which CPU it originally came
++from. This made BFS deterministic with respect to latency and provided
++guaranteed latencies dependent on number of processes and CPUs. The single
++runqueue, however, meant that all CPUs would compete for the single lock
++protecting it, which would lead to increasing lock contention as the number of
++CPUs rose and appeared to limit scalability of common workloads beyond 16
++logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
++increased overhead proportionate to the number of queued proecesses and led to
++cache thrashing while iterating over the linked list.
++
++MuQSS is an evolution of BFS, designed to maintain the same scheduling
++decision mechanism and be virtually deterministic without relying on the
++constrained design of the single runqueue by splitting out the single runqueue
++to be per-CPU and use skiplists instead of linked lists.
++
++The original reason for going back to a single runqueue design for BFS was that
++once multiple runqueues are introduced, per-CPU or otherwise, there will be
++complex interactions as each runqueue will be responsible for the scheduling
++latency and fairness of the tasks only on its own runqueue, and to achieve
++fairness and low latency across multiple CPUs, any advantage in throughput of
++having CPU local tasks causes other disadvantages. This is due to requiring a
++very complex balancing system to at best achieve some semblance of fairness
++across CPUs and can only maintain relatively low latency for tasks bound to the
++same CPUs, not across them. To increase said fairness and latency across CPUs,
++the advantage of local runqueue locking, which makes for better scalability, is
++lost due to having to grab multiple locks.
++
++MuQSS works around the problems inherent in multiple runqueue designs by
++making its skip lists priority ordered and through novel use of lockless
++examination of each other runqueue it can decide if it should take the earliest
++deadline task from another runqueue for latency reasons, or for CPU balancing
++reasons. It still does not have a balancing system, choosing to allow the
++next task scheduling decision and task wakeup CPU choice to allow balancing to
++happen by virtue of its choices.
++
++As a further evolution of the design, MuQSS normally configures sharing of
++runqueues in a logical fashion for when CPU resources are shared for improved
++latency and throughput. By default it shares runqueues and locks between
++multicore siblings. Optionally it can be configured to run with sharing of
++SMT siblings only, all SMP packages or no sharing at all. Additionally it can
++be selected at boot time.
++
++
++Design details.
++
++Custom skip list implementation:
++
++To avoid the overhead of building up and tearing down skip list structures,
++the variant used by MuQSS has a number of optimisations making it specific for
++its use case in the scheduler. It uses static arrays of 8 'levels' instead of
++building up and tearing down structures dynamically. This makes each runqueue
++only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
++it means that it scales O(log N) up to 64k x number of logical CPUs which is
++far beyond the realistic task limits each CPU could handle. By being 8 levels
++it also makes the array exactly one cacheline in size. Additionally, each
++skip list node is bidirectional making insertion and removal amortised O(1),
++being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
++first entry in each list at all times with MuQSS, so there is never a need to
++do a search and thus look up is always O(1). In interactive mode, the queues
++will be searched beyond their first entry if the first task is not suitable
++for affinity or SMT nice reasons.
++
++Task insertion:
++
++MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
++a custom skip list as described above (based on the original design by William
++Pugh). Insertion is ordered in such a way that there is never a need to do a
++search by ordering tasks according to static priority primarily, and then
++virtual deadline at the time of insertion.
++
++Niffies:
++
++Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
++of nanosecond resolution. Niffies are calculated per-runqueue from the high
++resolution TSC timers, and in order to maintain fairness are synchronised
++between CPUs whenever both runqueues are locked concurrently.
++
++Virtual deadline:
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in MuQSS is entirely in the virtual deadline mechanism. The one
++tunable in MuQSS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in niffies by this equation:
++
++ niffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (niffies) is
++constantly moving.
++
++Task lookup:
++
++As tasks are already pre-ordered according to anticipated scheduling order in
++the skip lists, lookup for the next suitable task per-runqueue is always a
++matter of simply selecting the first task in the 0th level skip list entry.
++In order to maintain optimal latency and fairness across CPUs, MuQSS does a
++novel examination of every other runqueue in cache locality order, choosing the
++best task across all runqueues. This provides near-determinism of how long any
++task across the entire system may wait before receiving CPU time. The other
++runqueues are first examine lockless and then trylocked to minimise the
++potential lock contention if they are likely to have a suitable better task.
++Each other runqueue lock is only held for as long as it takes to examine the
++entry for suitability. In "interactive" mode, the default setting, MuQSS will
++look for the best deadline task across all CPUs, while in !interactive mode,
++it will only select a better deadline task from another CPU if it is more
++heavily laden than the current one.
++
++Lookup is therefore O(k) where k is number of CPUs.
++
++
++Latency.
++
++Through the use of virtual deadlines to govern the scheduling order of normal
++tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
++the rr_interval tunable which is set to 6ms by default. This means that the
++longest a CPU bound task will wait for more CPU is proportional to the number
++of running tasks and in the common case of 0-2 running tasks per CPU, will be
++under the 7ms threshold for human perception of jitter. Additionally, as newly
++woken tasks will have an early deadline from their previous runtime, the very
++tasks that are usually latency sensitive will have the shortest interval for
++activation, usually preempting any existing CPU bound tasks.
++
++Tickless expiry:
++
++A feature of MuQSS is that it is not tied to the resolution of the chosen tick
++rate in Hz, instead depending entirely on the high resolution timers where
++possible for sub-millisecond accuracy on timeouts regarless of the underlying
++tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
++such as 100 by default, benefiting from the improved throughput and lower
++power usage it provides. Another advantage of this approach is that in
++combination with the Full No HZ option, which disables ticks on running task
++CPUs instead of just idle CPUs, the tick can be disabled at all times
++regardless of how many tasks are running instead of being limited to just one
++running task. Note that this option is NOT recommended for regular desktop
++users.
++
++
++Scalability and balancing.
++
++Unlike traditional approaches where balancing is a combination of CPU selection
++at task wakeup and intermittent balancing based on a vast array of rules set
++according to architecture, busyness calculations and special case management,
++MuQSS indirectly balances on the fly at task wakeup and next task selection.
++During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
++each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
++Additionally it selects any idle CPUs, if they are available, at any time over
++busy CPUs according to the following preference:
++
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++
++Mux is therefore SMT, MC and Numa aware without the need for extra
++intermittent balancing to maintain CPUs busy and make the most of cache
++coherency.
++
++
++Features
++
++As the initial prime target audience for MuQSS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
++interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
++policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
++does _not_ now feature is support for CGROUPS. The average user should neither
++need to know what these are, nor should they need to be using them to have good
++desktop behaviour. However since some applications refuse to work without
++cgroups, one can enable them with MuQSS as a stub and the filesystem will be
++created which will allow the applications to work.
++
++rr_interval:
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6. Valid values
++are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
++decreasing throughput, while increasing it will improve throughput, but at the
++cost of worsening latencies. It is based on the fact that humans can detect
++jitter at approximately 7ms, so aiming for much lower latencies is pointless
++under most circumstances. It is worth noting this fact when comparing the
++latency performance of MuQSS to other schedulers. Worst case latencies being
++higher than 7ms are far worse than average latencies not being in the
++microsecond range.
++
++interactive:
++
++ /proc/sys/kernel/interactive
++
++The value is a simple boolean of 1 for on and 0 for off and is set to on by
++default. Disabling this will disable the near-determinism of MuQSS when
++selecting the next task by not examining all CPUs for the earliest deadline
++task, or which CPU to wake to, instead prioritising CPU balancing for improved
++throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
++instead of across the whole system.
++
++Runqueue sharing.
++
++By default MuQSS chooses to share runqueue resources (specifically the skip
++list and locking) between multicore siblings. It is configurable at build time
++to select between None, SMT, MC and SMP, corresponding to no sharing, sharing
++only between simultaneous mulithreading siblings, multicore siblings, or
++symmetric multiprocessing physical packages. Additionally it can be se at
++bootime with the use of the rqshare parameter. The reason for configurability
++is that some architectures have CPUs with many multicore siblings (>= 16)
++where it may be detrimental to throughput to share runqueues and another
++sharing option may be desirable. Additionally, more sharing than usual can
++improve latency on a system-wide level at the expense of throughput if desired.
++
++The options are:
++none, smt, mc, smp
++
++eg:
++ rqshare=mc
++
++Isochronous scheduling:
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of CPU available per CPU, configurable as a percentage in
++the following "resource handling" tunable (as opposed to a scheduler tunable):
++
++iso_cpu:
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of MuQSS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++
++
++Idleprio scheduling:
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start a
++video encode or so on without any slowdown of other tasks. To avoid this policy
++from grabbing shared resources and holding them indefinitely, if it detects a
++state where the task is waiting on I/O, the machine is about to suspend to ram
++and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
++been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
++superuser privileges since it is effectively a lower scheduling policy. Tasks
++can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++schedtool -D -e ./mprime
++
++Subtick accounting:
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the timer
++tick frequency (HZ) is lowered. It is possible to create an application which
++uses almost 100% CPU, yet by being descheduled at the right time, records zero
++CPU usage. While the main problem with this is that there are possible security
++implications, it is also difficult to determine how much CPU a task really does
++use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
++usage. Thus, the amount of CPU reported as being used by MuQSS will more
++accurately represent how much CPU the task itself is using (as is shown for
++example by the 'time' application), so the reported values may be quite
++different to other schedulers. When comparing throughput of MuQSS to other
++designs, it is important to compare the actual completed work in terms of total
++wall clock time taken and total work done, rather than the reported "cpu usage".
++
++Symmetric MultiThreading (SMT) aware nice:
++
++SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
++logical CPU count rises by adding thread units to each CPU core, allowing more
++than one task to be run simultaneously on the same core, the disadvantage of it
++is that the CPU power is shared between the tasks, not summating to the power
++of two CPUs. The practical upshot of this is that two tasks running on
++separate threads of the same core run significantly slower than if they had one
++core each to run on. While smart CPU selection allows each task to have a core
++to itself whenever available (as is done on MuQSS), it cannot offset the
++slowdown that occurs when the cores are all loaded and only a thread is left.
++Most of the time this is harmless as the CPU is effectively overloaded at this
++point and the extra thread is of benefit. However when running a niced task in
++the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
++precisely the same amount of CPU power as the unniced one. MuQSS has an
++optional configuration feature known as SMT-NICE which selectively idles the
++secondary niced thread for a period proportional to the nice difference,
++allowing CPU distribution according to nice level to be maintained, at the
++expense of a small amount of extra overhead. If this is configured in on a
++machine without SMT threads, the overhead is minimal.
++
++
++Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 412314eebda6..eeb966fc70a8 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -40,6 +40,7 @@ show up in /proc/sys/kernel:
+ - hung_task_timeout_secs
+ - hung_task_warnings
+ - kexec_load_disabled
++- iso_cpu
+ - kptr_restrict
+ - l2cr [ PPC only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+@@ -74,6 +75,7 @@ show up in /proc/sys/kernel:
+ - randomize_va_space
+ - real-root-dev ==> Documentation/admin-guide/initrd.rst
+ - reboot-cmd [ SPARC only ]
++- rr_interval
+ - rtsig-max
+ - rtsig-nr
+ - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
+@@ -95,6 +97,7 @@ show up in /proc/sys/kernel:
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
++- yield_type
+ - version
+
+ ==============================================================
+@@ -411,6 +414,16 @@ When kptr_restrict is set to (2), kernel pointers printed using
+
+ ==============================================================
+
++iso_cpu: (MuQSS CPU scheduler only).
++
++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
++run effectively at realtime priority, averaged over a rolling five
++seconds over the -whole- system, meaning all cpus.
++
++Set to 70 (percent) by default.
++
++==============================================================
++
+ l2cr: (PPC only)
+
+ This flag controls the L2 cache of G3 processor boards. If
+@@ -837,6 +850,20 @@ rebooting. ???
+
+ ==============================================================
+
++rr_interval: (MuQSS CPU scheduler only)
++
++This is the smallest duration that any cpu process scheduling unit
++will run for. Increasing this value can increase throughput of cpu
++bound tasks substantially but at the expense of increased latencies
++overall. Conversely decreasing it will decrease average and maximum
++latencies but at the expense of throughput. This value is in
++milliseconds and the default value chosen depends on the number of
++cpus available at scheduler initialisation with a minimum of 6.
++
++Valid values are from 1-1000.
++
++==============================================================
++
+ rtsig-max & rtsig-nr:
+
+ The file rtsig-max can be used to tune the maximum number
+@@ -1075,3 +1102,13 @@ The softlockup threshold is (2 * watchdog_thresh). Setting this
+ tunable to zero will disable lockup detection altogether.
+
+ ==============================================================
++
++yield_type: (MuQSS CPU scheduler only)
++
++This determines what type of yield calls to sched_yield will perform.
++
++ 0: No yield.
++ 1: Yield only to better priority/deadline tasks. (default)
++ 2: Expire timeslice and recalculate deadline.
++
++==============================================================
+diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
+index 1aca2e8fd1ba..f892be336781 100644
+--- a/arch/arc/configs/tb10x_defconfig
++++ b/arch/arc/configs/tb10x_defconfig
+@@ -28,7 +28,7 @@ CONFIG_ARC_PLAT_TB10X=y
+ CONFIG_ARC_CACHE_LINE_SHIFT=5
+ CONFIG_HZ=250
+ CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_COMPACTION is not set
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
+index 43dab4890ad3..44a52166ca5e 100644
+--- a/arch/arm/configs/bcm2835_defconfig
++++ b/arch/arm/configs/bcm2835_defconfig
+@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_ARCH_MULTI_V6=y
+ CONFIG_ARCH_BCM=y
+ CONFIG_ARCH_BCM2835=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_KSM=y
+ CONFIG_CLEANCACHE=y
+diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
+index 4cb9829fccd1..8801d9ce3150 100644
+--- a/arch/arm/configs/imx_v6_v7_defconfig
++++ b/arch/arm/configs/imx_v6_v7_defconfig
+@@ -47,7 +47,7 @@ CONFIG_PCI_MSI=y
+ CONFIG_PCI_IMX6=y
+ CONFIG_SMP=y
+ CONFIG_ARM_PSCI=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
+ CONFIG_CMA=y
+diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
+index 0bcdec7cc169..10ceaefa51e0 100644
+--- a/arch/arm/configs/mps2_defconfig
++++ b/arch/arm/configs/mps2_defconfig
+@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y
+ CONFIG_SET_MEM_PARAM=y
+ CONFIG_DRAM_BASE=0x21000000
+ CONFIG_DRAM_SIZE=0x1000000
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_ATAGS is not set
+ CONFIG_ZBOOT_ROM_TEXT=0x0
+ CONFIG_ZBOOT_ROM_BSS=0x0
+diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
+index bbfb6759447b..1a5b9e61c2c8 100644
+--- a/arch/arm/configs/mxs_defconfig
++++ b/arch/arm/configs/mxs_defconfig
+@@ -27,7 +27,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
+ # CONFIG_ARCH_MULTI_V7 is not set
+ CONFIG_ARCH_MXS=y
+ # CONFIG_ARM_THUMB is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+index 99c00d835f47..39b91dfa55b5 100644
+--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_IRQ_TIMER0=12
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
+index e66ba31ef84d..675cadb3a0c4 100644
+--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF526=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BFIN526_EZBRD=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+index 0207c588c19f..4c517c443af5 100644
+--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_EZKIT_V2=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
+index 99c131ba7d90..bf8df3e6cf02 100644
+--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_USB_INT0=11
+diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+index cdeb51856f26..0220b3b15c53 100644
+--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
++++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_TLL6527M=y
+diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
+index ed7d2c096739..6023e3fd2c48 100644
+--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BFIN533_EZKIT=y
+ CONFIG_TIMER0=11
+ CONFIG_CLKIN_HZ=27000000
+diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
+index 0c241f4d28d7..f5cd0f18b711 100644
+--- a/arch/blackfin/configs/BF533-STAMP_defconfig
++++ b/arch/blackfin/configs/BF533-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TIMER0=11
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
+index e5360b30e39a..48085fde7f9e 100644
+--- a/arch/blackfin/configs/BF537-STAMP_defconfig
++++ b/arch/blackfin/configs/BF537-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
+index 60f6fb86125c..12deeaaef3cb 100644
+--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF538=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_IRQ_TIMER1=12
+diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
+index 38cb17d218d4..6a68ffc55b5a 100644
+--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF548_std=y
+ CONFIG_IRQ_TIMER0=11
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
+index 78f6bc79f910..e9f3ba783a4e 100644
+--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
++++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+index fac8bb578249..89b75a6c3fab 100644
+--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_SMP=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
+index 2a2e4d0cebc1..67b3d2f419ba 100644
+--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_IRQ_TIMER0=10
+ CONFIG_CLKIN_HZ=30000000
+diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
+index 3ce77f07208a..8cc75d4218fb 100644
+--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF609=y
+ CONFIG_PINT1_ASSIGN=0x01010000
+ CONFIG_PINT2_ASSIGN=0x07000101
+diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
+index f4a9200e1ab1..9faf0ec7007f 100644
+--- a/arch/blackfin/configs/BlackStamp_defconfig
++++ b/arch/blackfin/configs/BlackStamp_defconfig
+@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF532=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_BLACKSTAMP=y
+diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
+index 1902bb05d086..4a1ad4fd7bb2 100644
+--- a/arch/blackfin/configs/CM-BF527_defconfig
++++ b/arch/blackfin/configs/CM-BF527_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_TIMER0=12
+diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
+index c7926812971c..9d787e28bbe8 100644
+--- a/arch/blackfin/configs/PNAV-10_defconfig
++++ b/arch/blackfin/configs/PNAV-10_defconfig
+@@ -15,7 +15,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_PNAV10=y
+diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
+index 23fdc57d657a..225df32dc9a8 100644
+--- a/arch/blackfin/configs/SRV1_defconfig
++++ b/arch/blackfin/configs/SRV1_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BOOT_LOAD=0x400000
+diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
+index e28959479fe0..425c24e43c34 100644
+--- a/arch/blackfin/configs/TCM-BF518_defconfig
++++ b/arch/blackfin/configs/TCM-BF518_defconfig
+@@ -23,7 +23,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_BFIN518F_TCM=y
+diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
+index 499f51498ecb..f7cb39b0662c 100644
+--- a/arch/mips/configs/fuloong2e_defconfig
++++ b/arch/mips/configs/fuloong2e_defconfig
+@@ -2,7 +2,8 @@ CONFIG_MACH_LOONGSON64=y
+ CONFIG_64BIT=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_LOCALVERSION="-fuloong2e"
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
+index 55438fc9991e..db03ef4f737d 100644
+--- a/arch/mips/configs/gpr_defconfig
++++ b/arch/mips/configs/gpr_defconfig
+@@ -1,7 +1,8 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_GPR=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
+index 7ddfb4ef9479..93e439ad3fce 100644
+--- a/arch/mips/configs/ip22_defconfig
++++ b/arch/mips/configs/ip22_defconfig
+@@ -4,7 +4,8 @@ CONFIG_CPU_R5000=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
+index d0a4c2cfacf8..6f0600e99c25 100644
+--- a/arch/mips/configs/ip28_defconfig
++++ b/arch/mips/configs/ip28_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_SGI_IP28=y
+ CONFIG_ARC_CONSOLE=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
+index 9ad1c94376c8..1d62ce7ff5dc 100644
+--- a/arch/mips/configs/jazz_defconfig
++++ b/arch/mips/configs/jazz_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MACH_JAZZ=y
+ CONFIG_OLIVETTI_M700=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
+index c3d0d0a6e044..aa3426d5f7d7 100644
+--- a/arch/mips/configs/mtx1_defconfig
++++ b/arch/mips/configs/mtx1_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_MTX1=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
+index c4477a4d40c1..95caf0af665f 100644
+--- a/arch/mips/configs/nlm_xlr_defconfig
++++ b/arch/mips/configs/nlm_xlr_defconfig
+@@ -5,7 +5,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+ CONFIG_SMP=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_KEXEC=y
+ CONFIG_CROSS_COMPILE=""
+ # CONFIG_LOCALVERSION_AUTO is not set
+diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
+index 41190c2036e6..3728897ab2b2 100644
+--- a/arch/mips/configs/pic32mzda_defconfig
++++ b/arch/mips/configs/pic32mzda_defconfig
+@@ -1,7 +1,7 @@
+ CONFIG_MACH_PIC32=y
+ CONFIG_DTB_PIC32_MZDA_SK=y
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_NO_HZ=y
+diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
+index b22a3cf149b6..cfffca3d37f4 100644
+--- a/arch/mips/configs/pistachio_defconfig
++++ b/arch/mips/configs/pistachio_defconfig
+@@ -5,7 +5,7 @@ CONFIG_MIPS_CPS=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+ CONFIG_ZSMALLOC=y
+ CONFIG_NR_CPUS=4
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_DEFAULT_HOSTNAME="localhost"
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
+index e73cdb08fc6e..dc62fa8d6065 100644
+--- a/arch/mips/configs/pnx8335_stb225_defconfig
++++ b/arch/mips/configs/pnx8335_stb225_defconfig
+@@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_128=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ # CONFIG_LOCALVERSION_AUTO is not set
+ # CONFIG_SWAP is not set
+diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
+index 5f71aa598b06..767f1999ead0 100644
+--- a/arch/mips/configs/rm200_defconfig
++++ b/arch/mips/configs/rm200_defconfig
+@@ -2,7 +2,8 @@ CONFIG_SNI_RM=y
+ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_ARC_CONSOLE=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
+index ccc109761f44..a6a5b0b7a9c9 100644
+--- a/arch/parisc/configs/712_defconfig
++++ b/arch/parisc/configs/712_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_GSC_LASI=y
+ # CONFIG_PDC_CHASSIS is not set
+ CONFIG_BINFMT_MISC=m
+diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
+index 8d41a73bd71b..b8e0a6662ff9 100644
+--- a/arch/parisc/configs/c3000_defconfig
++++ b/arch/parisc/configs/c3000_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA8X00=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_GSC is not set
+ CONFIG_PCI=y
+ CONFIG_PCI_LBA=y
+diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
+index 52c9050a7c5c..8d86d2e989f4 100644
+--- a/arch/parisc/configs/default_defconfig
++++ b/arch/parisc/configs/default_defconfig
+@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_IOMMU_CCIO=y
+ CONFIG_GSC_LASI=y
+ CONFIG_GSC_WAX=y
+diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
+index 4bb832a41d55..418d58938a75 100644
+--- a/arch/powerpc/configs/c2k_defconfig
++++ b/arch/powerpc/configs/c2k_defconfig
+@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+ CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+ CONFIG_GEN_RTC=y
+ CONFIG_HIGHMEM=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_PM=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
+index 7ee736f20774..8663c0043a56 100644
+--- a/arch/powerpc/configs/ppc6xx_defconfig
++++ b/arch/powerpc/configs/ppc6xx_defconfig
+@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y
+ CONFIG_MCU_MPC8349EMITX=y
+ CONFIG_HIGHMEM=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_HIBERNATION=y
+ CONFIG_PM_DEBUG=y
+diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+index 9033c8194eda..a7437387a8de 100644
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -64,11 +64,6 @@ static struct task_struct *spusched_task;
+ static struct timer_list spusched_timer;
+ static struct timer_list spuloadavg_timer;
+
+-/*
+- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+- */
+-#define NORMAL_PRIO 120
+-
+ /*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
+index b2d8802f43b4..46434ca1fa10 100644
+--- a/arch/score/configs/spct6600_defconfig
++++ b/arch/score/configs/spct6600_defconfig
+@@ -1,5 +1,5 @@
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
+index 5a1097641247..eb5fbf554e7f 100644
+--- a/arch/sh/configs/se7712_defconfig
++++ b/arch/sh/configs/se7712_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=66666666
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
+index 9c0ef13bee10..cbaa65c8bf9e 100644
+--- a/arch/sh/configs/se7721_defconfig
++++ b/arch/sh/configs/se7721_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_7721_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=33333333
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
+index ceb48e9b70f4..1a69eda6610c 100644
+--- a/arch/sh/configs/titan_defconfig
++++ b/arch/sh/configs/titan_defconfig
+@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y
+ CONFIG_SH_PCLK_FREQ=30000000
+ CONFIG_SH_DMA=y
+ CONFIG_SH_DMA_API=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
+ CONFIG_PCI=y
+diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
+index 4d4e1cc6402f..04bea1d28ba7 100644
+--- a/arch/sparc/configs/sparc64_defconfig
++++ b/arch/sparc/configs/sparc64_defconfig
+@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NUMA=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_SUN_LDOMS=y
+ CONFIG_PCI=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
+index 357a4c271ad4..939c63ba7e6e 100644
+--- a/arch/tile/configs/tilegx_defconfig
++++ b/arch/tile/configs/tilegx_defconfig
+@@ -47,7 +47,7 @@ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_NR_CPUS=100
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TILE_PCI_IO=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
+index da2858755fa1..e8c4003cbd81 100644
+--- a/arch/tile/configs/tilepro_defconfig
++++ b/arch/tile/configs/tilepro_defconfig
+@@ -44,7 +44,7 @@ CONFIG_KARMA_PARTITION=y
+ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+ CONFIG_BINFMT_MISC=y
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 0fa71a78ec99..80d620757a09 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1016,10 +1016,26 @@ config SCHED_SMT
+ depends on SMP
+ ---help---
+ SMT scheduler support improves the CPU scheduler's decision making
+- when dealing with Intel Pentium 4 chips with HyperThreading at a
++ when dealing with Intel P4/Core 2 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
++config SMT_NICE
++ bool "SMT (Hyperthreading) aware nice priority and policy support"
++ depends on SCHED_MUQSS && SCHED_SMT
++ default y
++ ---help---
++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
++ of the use of 'nice' levels and different scheduling policies
++ (e.g. realtime) due to sharing of CPU power between hyperthreads.
++ SMT nice support makes each logical CPU aware of what is running on
++ its hyperthread siblings, maintaining appropriate distribution of
++ CPU according to nice levels and scheduling policies at the expense
++ of slightly increased overhead.
++
++ If unsure say Y here.
++
++
+ config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+@@ -1050,6 +1066,79 @@ config SCHED_MC_PRIO
+
+ If unsure say Y here.
+
++choice
++ prompt "CPU scheduler runqueue sharing"
++ default RQ_MC if SCHED_MUQSS
++ default RQ_NONE
++
++config RQ_NONE
++ bool "No sharing"
++ help
++ This is the default behaviour where the CPU scheduler has one runqueue
++ per CPU, whether it is a physical or logical CPU (hyperthread).
++
++ This can still be enabled runtime with the boot parameter
++ rqshare=none
++
++ If unsure, say N.
++
++config RQ_SMT
++ bool "SMT (hyperthread) siblings"
++ depends on SCHED_SMT && SCHED_MUQSS
++
++ help
++ With this option enabled, the CPU scheduler will have one runqueue
++ shared by SMT (hyperthread) siblings. As these logical cores share
++ one physical core, sharing the runqueue resource can lead to decreased
++ overhead, lower latency and higher throughput.
++
++ This can still be enabled runtime with the boot parameter
++ rqshare=smt
++
++ If unsure, say N.
++
++config RQ_MC
++ bool "Multicore siblings"
++ depends on SCHED_MC && SCHED_MUQSS
++ help
++ With this option enabled, the CPU scheduler will have one runqueue
++ shared by multicore siblings in addition to any SMT siblings.
++ As these physical cores share caches, sharing the runqueue resource
++ will lead to lower latency, but its effects on overhead and throughput
++ are less predictable. As a general rule, 6 or fewer cores will likely
++ benefit from this, while larger CPUs will only derive a latency
++ benefit. If your workloads are primarily single threaded, this will
++ possibly worsen throughput. If you are only concerned about latency
++ then enable this regardless of how many cores you have.
++
++ This can still be enabled runtime with the boot parameter
++ rqshare=mc
++
++ If unsure, say Y.
++
++config RQ_SMP
++ bool "Symmetric Multi-Processing"
++ depends on SMP && SCHED_MUQSS
++ help
++ With this option enabled, the CPU scheduler will have one runqueue
++ shared by all physical CPUs unless they are on separate NUMA nodes.
++ As physical CPUs usually do not share resources, sharing the runqueue
++ will normally worsen throughput but improve latency. If you only
++ care about latency enable this.
++
++ This can still be enabled runtime with the boot parameter
++ rqshare=smp
++
++ If unsure, say N.
++endchoice
++
++config SHARERQ
++ int
++ default 0 if RQ_NONE
++ default 1 if RQ_SMT
++ default 2 if RQ_MC
++ default 3 if RQ_SMP
++
+ source "kernel/Kconfig.preempt"
+
+ config UP_LATE_INIT
+@@ -1414,7 +1503,7 @@ config HIGHMEM64G
+ endchoice
+
+ choice
+- prompt "Memory split" if EXPERT
++ prompt "Memory split"
+ default VMSPLIT_3G
+ depends on X86_32
+ ---help---
+@@ -1434,17 +1523,17 @@ choice
+ option alone!
+
+ config VMSPLIT_3G
+- bool "3G/1G user/kernel split"
++ bool "Default 896MB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_3G_OPT
+ depends on !X86_PAE
+- bool "3G/1G user/kernel split (for full 1G low memory)"
++ bool "1GB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_2G
+- bool "2G/2G user/kernel split"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_2G_OPT
+ depends on !X86_PAE
+- bool "2G/2G user/kernel split (for full 2G low memory)"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_1G
+- bool "1G/3G user/kernel split"
++ bool "3GB lowmem (1G/3G user/kernel split)"
+ endchoice
+
+ config PAGE_OFFSET
+diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
+index 0eb9f92f3717..e5890ae917e5 100644
+--- a/arch/x86/configs/i386_defconfig
++++ b/arch/x86/configs/i386_defconfig
+@@ -41,7 +41,7 @@ CONFIG_SMP=y
+ CONFIG_X86_GENERIC=y
+ CONFIG_HPET_TIMER=y
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_X86_REBOOTFIXUPS=y
+diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
+index e32fc1f274d8..4368ba4f7967 100644
+--- a/arch/x86/configs/x86_64_defconfig
++++ b/arch/x86/configs/x86_64_defconfig
+@@ -40,7 +40,7 @@ CONFIG_SMP=y
+ CONFIG_CALGARY_IOMMU=y
+ CONFIG_NR_CPUS=64
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_MICROCODE=y
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index a4a8914bf7a4..2d9be91e8e87 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -82,7 +82,7 @@ config MQ_IOSCHED_KYBER
+
+ config IOSCHED_BFQ
+ tristate "BFQ I/O scheduler"
+- default n
++ default y
+ ---help---
+ BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
+ of the device among all processes according to their weights,
+diff --git a/drivers/block/swim.c b/drivers/block/swim.c
+index 64e066eba72e..257375576963 100644
+--- a/drivers/block/swim.c
++++ b/drivers/block/swim.c
+@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base,
+ if (swim_readbit(base, MOTOR_ON))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ } else if (action == OFF) {
+ swim_action(base, MOTOR_OFF);
+@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base)
+ if (!swim_readbit(base, DISK_IN))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ swim_select(base, RELAX);
+ }
+@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base)
+ for (wait = 0; wait < HZ; wait++) {
+
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ swim_select(base, RELAX);
+ if (!swim_readbit(base, STEP))
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 05ec530b8a3a..309c62ef80d1 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -879,7 +879,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
++ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ return 0;
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index e0b0d7e2d976..a821ab81e742 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3531,7 +3531,7 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ /* No need for locks, the interface is down. */
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index f929e72bdac8..7996500be12e 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1254,7 +1254,7 @@ static int ssif_remove(struct i2c_client *client)
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
+index 5918ea7499bb..5228e78df804 100644
+--- a/drivers/char/snsc.c
++++ b/drivers/char/snsc.c
+@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+ add_wait_queue(&sd->sd_rq, &wait);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_rq, &wait);
+ if (signal_pending(current)) {
+@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
+ add_wait_queue(&sd->sd_wq, &wait);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_wq, &wait);
+ if (signal_pending(current)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index a1c68e6a689e..63b3bae20cd1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index b9239ba067c4..e005ba0d4971 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -202,7 +202,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+ break;
+ }
+ if (lazy)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 9545a346044f..c24cf1302ec7 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev,
+ prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+ }
+
+- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
+ if (leftover)
+ return 0;
+
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index 6bbb0b1e6032..f4b83648c405 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
+ default:
+ delay = 402;
+ }
+- /*
+- * TODO: Make sure that we wait at least required delay but why we
+- * have to extend it one tick more?
+- */
+- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
++ schedule_msec_hrtimeout_interruptible(delay + 1);
+ }
+
+ static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
+diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
+index 3db966db83eb..f0fab7676f72 100644
+--- a/drivers/media/i2c/msp3400-driver.c
++++ b/drivers/media/i2c/msp3400-driver.c
+@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
+index 012859e6dc7b..206bd08265a5 100644
+--- a/drivers/media/pci/cx18/cx18-gpio.c
++++ b/drivers/media/pci/cx18/cx18-gpio.c
+@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
+
+ /* Assert */
+ gpio_update(cx, mask, ~active_lo);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
++ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
+
+ /* Deassert */
+ gpio_update(cx, mask, ~active_hi);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
+ }
+
+ /*
+diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
+index f752f3993687..23372af61ebf 100644
+--- a/drivers/media/pci/ivtv/ivtv-gpio.c
++++ b/drivers/media/pci/ivtv/ivtv-gpio.c
+@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
+ curout = (curout & ~0xF) | 1;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ /* We could use something else for smaller time */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ curout |= 2;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ curdir &= ~0x80;
+@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
+ curout = read_reg(IVTV_REG_GPIO_OUT);
+ curout &= ~(1 << itv->card->xceive_pin);
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+
+ curout |= 1 << itv->card->xceive_pin;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ return 0;
+ }
+
+diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
+index 4cdc6d2be85d..22c0803cbff3 100644
+--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
+@@ -1154,7 +1154,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+- schedule_timeout(msecs_to_jiffies(25));
++ schedule_msec_hrtimeout((25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
+diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
+index d27c6df97566..e9ffc4eeb478 100644
+--- a/drivers/media/pci/ivtv/ivtv-streams.c
++++ b/drivers/media/pci/ivtv/ivtv-streams.c
+@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ time_before(jiffies,
+ then + msecs_to_jiffies(2000))) {
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ }
+
+ /* To convert jiffies to ms, we must multiply by 1000
+diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
+index dc6c4f985911..9dc616a01315 100644
+--- a/drivers/media/radio/radio-mr800.c
++++ b/drivers/media/radio/radio-mr800.c
+@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ retval = -ENODATA;
+ break;
+ }
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
+index 04ed1a5d1177..d593d28dc286 100644
+--- a/drivers/media/radio/radio-tea5777.c
++++ b/drivers/media/radio/radio-tea5777.c
+@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
+ }
+
+ if (wait) {
+- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
++ if (schedule_msec_hrtimeout_interruptible((wait)))
+ return -ERESTARTSYS;
+ }
+
+diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
+index 7412fe1b10c6..92dce75e6ce9 100644
+--- a/drivers/media/radio/tea575x.c
++++ b/drivers/media/radio/tea575x.c
+@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ /* some signal arrived, stop search */
+ tea->val &= ~TEA575X_BIT_SEARCH;
+ snd_tea575x_set_freq(tea);
+diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
+index d6fb2e1a759a..7ac951b84beb 100644
+--- a/drivers/mfd/ucb1x00-core.c
++++ b/drivers/mfd/ucb1x00-core.c
+@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
+ break;
+ /* yield to other processes */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ return UCB_ADC_DAT(val);
+diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
+index 128d5615c804..38e68e98d9cb 100644
+--- a/drivers/misc/sgi-xp/xpc_channel.c
++++ b/drivers/misc/sgi-xp/xpc_channel.c
+@@ -837,7 +837,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+- ret = schedule_timeout(1);
++ ret = schedule_min_hrtimeout();
+ finish_wait(&ch->msg_allocate_wq, &wait);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index 433a14b9f731..4d197a99472b 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ retry--;
+ }
+
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+index f530a80f5051..76905e410197 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
+@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 55a78eb96961..1c2d0bfbb1e4 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2568,7 +2568,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index d9eea8cfe6cb..48dbe8b63500 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -770,7 +770,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+index 19c442cb93e4..448f41782060 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
+ * doesn't seem to have as many firmware restart cycles...
+ *
+ * As a test, we're sticking in a 1/100s delay here */
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ return 0;
+
+@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
+ IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
+ i = 5000;
+ do {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_uninterruptible((40));
+ /* Todo... wait for sync command ... */
+
+ read_register(priv->net_dev, IPW_REG_INTA, &inta);
+diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
+index 2d1a5c737c6e..0d11dbe1f2e5 100644
+--- a/drivers/parport/ieee1284.c
++++ b/drivers/parport/ieee1284.c
+@@ -208,7 +208,7 @@ int parport_wait_peripheral(struct parport *port,
+ /* parport_wait_event didn't time out, but the
+ * peripheral wasn't actually ready either.
+ * Wait for another 10ms. */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ }
+
+diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
+index 5d41dda6da4e..34705f6b423f 100644
+--- a/drivers/parport/ieee1284_ops.c
++++ b/drivers/parport/ieee1284_ops.c
+@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
+ /* Yield the port for a while. */
+ if (count && dev->port->irq != PARPORT_IRQ_NONE) {
+ parport_release (dev);
+- schedule_timeout_interruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_interruptible((40));
+ parport_claim_or_block (dev);
+ }
+ else
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index a0c95853fd3f..18fd7b1b4ee0 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -809,7 +809,7 @@ static int ips_adjust(void *data)
+ ips_gpu_lower(ips);
+
+ sleep:
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
+ } while (!kthread_should_stop());
+
+ dev_dbg(ips->dev, "ips-adjust thread stopped\n");
+@@ -988,7 +988,7 @@ static int ips_monitor(void *data)
+ seqno_timestamp = get_jiffies_64();
+
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+
+ /* Collect an initial average */
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
+@@ -1015,7 +1015,7 @@ static int ips_monitor(void *data)
+ mchp_samples[i] = mchp;
+ }
+
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ if (kthread_should_stop())
+ break;
+ }
+@@ -1042,7 +1042,7 @@ static int ips_monitor(void *data)
+ * us to reduce the sample frequency if the CPU and GPU are idle.
+ */
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ last_sample_period = IPS_SAMPLE_PERIOD;
+
+ timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
+diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
+index 483c7993516b..fddbaa475066 100644
+--- a/drivers/rtc/rtc-wm8350.c
++++ b/drivers/rtc/rtc-wm8350.c
+@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
+
+ if (!retries) {
+@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
+
+ if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
+@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
+
+ if (rtc_ctrl & WM8350_RTC_ALMSTS)
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 8a739b74cfb7..9e939ee76e72 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -50,6 +50,7 @@ config SCSI_NETLINK
+ config SCSI_MQ_DEFAULT
+ bool "SCSI: use blk-mq I/O path by default"
+ depends on SCSI
++ default y
+ ---help---
+ This option enables the new blk-mq based I/O path for SCSI
+ devices by default. With the option the scsi_mod.use_blk_mq
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 8cbd3c9f0b4c..7e3f9baa4ac6 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+@@ -2255,7 +2255,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
+ }
+ }
+
+- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index c0cdaef4db24..4ed5fc50bdb5 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -5131,7 +5131,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+ tgt_id, lun_id, context);
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
++ schedule_msec_hrtimeout_uninterruptible((20));
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+ }
+ if (cnt) {
+diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
+index d8a376b7882d..9d0e67d98b2c 100644
+--- a/drivers/scsi/snic/snic_scsi.c
++++ b/drivers/scsi/snic/snic_scsi.c
+@@ -2354,7 +2354,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 5d610af6799f..b7a3b2e4b422 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -4646,7 +4646,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
+ if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (schedule_timeout(1))
++ if (schedule_min_hrtimeout())
+ return -EIO;
+ }
+ if (i == timeout) {
+diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+index a173b69e2f92..f7d87a475e17 100644
+--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+@@ -329,7 +329,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
+ schedule();
+ } else {
+ now = jiffies;
+- schedule_timeout(msecs_to_jiffies(tms));
++ schedule_msec_hrtimeout((tms));
+ tms -= jiffies_to_msecs(jiffies - now);
+ if (tms < 0) /* no more wait but may have new event */
+ tms = 0;
+diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
+index 70e0b8623110..04ae8e3f2448 100644
+--- a/drivers/staging/rts5208/rtsx.c
++++ b/drivers/staging/rts5208/rtsx.c
+@@ -507,7 +507,7 @@ static int rtsx_polling_thread(void *__dev)
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
++ schedule_msec_hrtimeout((POLLING_INTERVAL));
+
+ /* lock the device pointers */
+ mutex_lock(&dev->dev_mutex);
+diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
+index 28519754b2f0..a96805bbec5c 100644
+--- a/drivers/staging/speakup/speakup_acntpc.c
++++ b/drivers/staging/speakup/speakup_acntpc.c
+@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
+index 0877b4044c28..627102d048c1 100644
+--- a/drivers/staging/speakup/speakup_apollo.c
++++ b/drivers/staging/speakup/speakup_apollo.c
+@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
+index 3741c0fcf5bb..bff857b4aa5f 100644
+--- a/drivers/staging/speakup/speakup_decext.c
++++ b/drivers/staging/speakup/speakup_decext.c
+@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
+index 303f393d3f2f..d8260b71377d 100644
+--- a/drivers/staging/speakup/speakup_decpc.c
++++ b/drivers/staging/speakup/speakup_decpc.c
+@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (dt_sendchar(ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
+index 2ea22a2eb5f9..1193daa62a3a 100644
+--- a/drivers/staging/speakup/speakup_dectlk.c
++++ b/drivers/staging/speakup/speakup_dectlk.c
+@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
+index f8cb83c9b82e..9da0633c569c 100644
+--- a/drivers/staging/speakup/speakup_dtlk.c
++++ b/drivers/staging/speakup/speakup_dtlk.c
+@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
+index de76183932e1..c4d018692d66 100644
+--- a/drivers/staging/speakup/speakup_keypc.c
++++ b/drivers/staging/speakup/speakup_keypc.c
+@@ -199,7 +199,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -232,7 +232,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies+jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index c06e6a810999..d13bd3c47b7f 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -93,7 +93,7 @@ void spk_do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = synth->procspeech;
+ if (!synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+index 92dceb557886..8b1b1bd083c5 100644
+--- a/drivers/staging/unisys/visornic/visornic_main.c
++++ b/drivers/staging/unisys/visornic/visornic_main.c
+@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (atomic_read(&devdata->usage))
+ break;
+@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
+index 6199d4806193..7c7165f2dad4 100644
+--- a/drivers/video/fbdev/omap/hwa742.c
++++ b/drivers/video/fbdev/omap/hwa742.c
+@@ -926,7 +926,7 @@ static void hwa742_resume(void)
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(5));
++ schedule_msec_hrtimeout((5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+ }
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index c3d49e13643c..84b984986f4d 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -1286,7 +1286,7 @@ static int pxafb_smart_thread(void *arg)
+ mutex_unlock(&fbi->ctrlr_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(30));
++ schedule_msec_hrtimeout((30));
+ }
+
+ pr_debug("%s(): task ending\n", __func__);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index c1618ab9fecf..8e86cc36bcad 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6075,7 +6075,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
+
+ if (flush != BTRFS_RESERVE_NO_FLUSH &&
+ btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ if (delalloc_lock)
+ mutex_lock(&inode->delalloc_mutex);
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 022b19336fee..50e187f64c32 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -89,7 +89,7 @@ static int caching_kthread(void *data)
+ btrfs_release_path(path);
+ root->ino_cache_progress = last;
+ up_read(&fs_info->commit_root_sem);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ goto again;
+ } else
+ continue;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9298324325ed..0250c8e2d875 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -467,7 +467,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ seq_printf(m, "0 0 0\n");
+ else
+ seq_printf(m, "%llu %llu %lu\n",
+- (unsigned long long)task->se.sum_exec_runtime,
++ (unsigned long long)tsk_seruntime(task),
+ (unsigned long long)task->sched_info.run_delay,
+ task->sched_info.pcount);
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 21f5aa0b217f..ee9b46394fdf 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -297,6 +297,7 @@ static inline void set_freezable(void) {}
+ #define wait_event_freezekillable_unsafe(wq, condition) \
+ wait_event_killable(wq, condition)
+
++#define pm_freezing (false)
+ #endif /* !CONFIG_FREEZER */
+
+ #endif /* FREEZER_H_INCLUDED */
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index a454b8aeb938..f0a14e08e8e8 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -55,7 +55,11 @@ extern struct cred init_cred;
+ .pid = &init_struct_pid, \
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++#define INIT_TASK_COMM "MuQSS"
++#else
+ #define INIT_TASK_COMM "swapper"
++#endif
+
+ /* Attach to the init_task data structure for proper alignment */
+ #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
+index 627efac73e6d..e25a094c2f81 100644
+--- a/include/linux/ioprio.h
++++ b/include/linux/ioprio.h
+@@ -53,6 +53,8 @@ enum {
+ */
+ static inline int task_nice_ioprio(struct task_struct *task)
+ {
++ if (iso_task(task))
++ return 0;
+ return (task_nice(task) + 20) / 5;
+ }
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index b161ef8a902e..d297b4ea0099 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -27,6 +27,9 @@
+ #include <linux/signal_types.h>
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
++#ifdef CONFIG_SCHED_MUQSS
++#include <linux/skip_list.h>
++#endif
+
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -168,13 +171,40 @@ struct task_group;
+
+ extern void scheduler_tick(void);
+
+-#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+-
++#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+ extern long schedule_timeout(long timeout);
+ extern long schedule_timeout_interruptible(long timeout);
+ extern long schedule_timeout_killable(long timeout);
+ extern long schedule_timeout_uninterruptible(long timeout);
+ extern long schedule_timeout_idle(long timeout);
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++extern long schedule_msec_hrtimeout(long timeout);
++extern long schedule_min_hrtimeout(void);
++extern long schedule_msec_hrtimeout_interruptible(long timeout);
++extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
++#else
++static inline long schedule_msec_hrtimeout(long timeout)
++{
++ return schedule_timeout(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_min_hrtimeout(void)
++{
++ return schedule_timeout(1);
++}
++
++static inline long schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
++}
++#endif
++
+ asmlinkage void schedule(void);
+ extern void schedule_preempt_disabled(void);
+
+@@ -544,9 +574,11 @@ struct task_struct {
+ unsigned int flags;
+ unsigned int ptrace;
+
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
++ int on_cpu;
++#endif
+ #ifdef CONFIG_SMP
+ struct llist_node wake_entry;
+- int on_cpu;
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* Current CPU: */
+ unsigned int cpu;
+@@ -571,10 +603,25 @@ struct task_struct {
+ int static_prio;
+ int normal_prio;
+ unsigned int rt_priority;
++#ifdef CONFIG_SCHED_MUQSS
++ int time_slice;
++ u64 deadline;
++ skiplist_node node; /* Skip list node */
++ u64 last_ran;
++ u64 sched_time; /* sched_clock time spent running */
++#ifdef CONFIG_SMT_NICE
++ int smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++#ifdef CONFIG_HOTPLUG_CPU
++ bool zerobound; /* Bound to CPU0 for hotplug */
++#endif
++ unsigned long rt_timeout;
++#else /* CONFIG_SCHED_MUQSS */
+
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+ #endif
+@@ -723,6 +770,10 @@ struct task_struct {
+ #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ u64 utimescaled;
+ u64 stimescaled;
++#endif
++#ifdef CONFIG_SCHED_MUQSS
++ /* Unbanked cpu time */
++ unsigned long utime_ns, stime_ns;
+ #endif
+ u64 gtime;
+ struct prev_cputime prev_cputime;
+@@ -1117,6 +1168,40 @@ struct task_struct {
+ */
+ };
+
++#ifdef CONFIG_SCHED_MUQSS
++#define tsk_seruntime(t) ((t)->sched_time)
++#define tsk_rttimeout(t) ((t)->rt_timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++}
++
++void print_scheduler_version(void);
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return (p->policy == SCHED_ISO);
++}
++#else /* CFS */
++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t) ((t)->rt.timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++ p->nr_cpus_allowed = current->nr_cpus_allowed;
++}
++
++static inline void print_scheduler_version(void)
++{
++ printk(KERN_INFO "CFS CPU scheduler.\n");
++}
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return false;
++}
++#endif /* CONFIG_SCHED_MUQSS */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ return task->pids[PIDTYPE_PID].pid;
+diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
+index 3d3a97d9399d..b03197d35524 100644
+--- a/include/linux/sched/nohz.h
++++ b/include/linux/sched/nohz.h
+@@ -6,7 +6,7 @@
+ * This is the interface between the scheduler and nohz/dynticks:
+ */
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ extern void cpu_load_update_nohz_start(void);
+ extern void cpu_load_update_nohz_stop(void);
+ #else
+@@ -23,7 +23,7 @@ static inline void nohz_balance_enter_idle(int cpu) { }
+ static inline void set_cpu_sd_state_idle(void) { }
+ #endif
+
+-#ifdef CONFIG_NO_HZ_COMMON
++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ void calc_load_nohz_start(void);
+ void calc_load_nohz_stop(void);
+ #else
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index 7d64feafc408..43c9d9e50c09 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -20,8 +20,20 @@
+ */
+
+ #define MAX_USER_RT_PRIO 100
++
++#ifdef CONFIG_SCHED_MUQSS
++/* Note different MAX_RT_PRIO */
++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
++
++#define ISO_PRIO (MAX_RT_PRIO)
++#define NORMAL_PRIO (MAX_RT_PRIO + 1)
++#define IDLE_PRIO (MAX_RT_PRIO + 2)
++#define PRIO_LIMIT ((IDLE_PRIO) + 1)
++#else /* CONFIG_SCHED_MUQSS */
+ #define MAX_RT_PRIO MAX_USER_RT_PRIO
+
++#endif /* CONFIG_SCHED_MUQSS */
++
+ #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index e5af028c08b4..010b2244e0b6 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return true;
++#ifndef CONFIG_SCHED_MUQSS
+ if (policy == SCHED_DEADLINE)
+ return true;
++#endif
+ return false;
+ }
+
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 5be31eb7b266..2cfc0347db0b 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -80,7 +80,7 @@ extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+ extern void free_task(struct task_struct *tsk);
+
+ /* sched_exec is called by processes performing an exec */
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
+ extern void sched_exec(void);
+ #else
+ #define sched_exec() {}
+diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
+new file mode 100644
+index 000000000000..d4be84ba273b
+--- /dev/null
++++ b/include/linux/skip_list.h
+@@ -0,0 +1,33 @@
++#ifndef _LINUX_SKIP_LISTS_H
++#define _LINUX_SKIP_LISTS_H
++typedef u64 keyType;
++typedef void *valueType;
++
++typedef struct nodeStructure skiplist_node;
++
++struct nodeStructure {
++ int level; /* Levels in this structure */
++ keyType key;
++ valueType value;
++ skiplist_node *next[8];
++ skiplist_node *prev[8];
++};
++
++typedef struct listStructure {
++ int entries;
++ int level; /* Maximum level of the list
++ (1 more than the number of levels in the list) */
++ skiplist_node *header; /* pointer to header */
++} skiplist;
++
++void skiplist_init(skiplist_node *slnode);
++skiplist *new_skiplist(skiplist_node *slnode);
++void free_skiplist(skiplist *l);
++void skiplist_node_init(skiplist_node *node);
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
++void skiplist_delete(skiplist *l, skiplist_node *node);
++
++static inline bool skiplist_node_empty(skiplist_node *node) {
++ return (!node->next[0]);
++}
++#endif /* _LINUX_SKIP_LISTS_H */
+diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
+index 22627f80063e..17077cd6fc40 100644
+--- a/include/uapi/linux/sched.h
++++ b/include/uapi/linux/sched.h
+@@ -37,9 +37,16 @@
+ #define SCHED_FIFO 1
+ #define SCHED_RR 2
+ #define SCHED_BATCH 3
+-/* SCHED_ISO: reserved but not implemented yet */
++/* SCHED_ISO: Implemented on MuQSS only */
+ #define SCHED_IDLE 5
++#ifdef CONFIG_SCHED_MUQSS
++#define SCHED_ISO 4
++#define SCHED_IDLEPRIO SCHED_IDLE
++#define SCHED_MAX (SCHED_IDLEPRIO)
++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
++#else /* CONFIG_SCHED_MUQSS */
+ #define SCHED_DEADLINE 6
++#endif /* CONFIG_SCHED_MUQSS */
+
+ /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+ #define SCHED_RESET_ON_FORK 0x40000000
+diff --git a/init/Kconfig b/init/Kconfig
+index e37f4b2a6445..2a88a43284c1 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -38,6 +38,18 @@ config THREAD_INFO_IN_TASK
+
+ menu "General setup"
+
++config SCHED_MUQSS
++ bool "MuQSS cpu scheduler"
++ select HIGH_RES_TIMERS
++ ---help---
++ The Multiple Queue Skiplist Scheduler for excellent interactivity and
++ responsiveness on the desktop and highly scalable deterministic
++ low latency on any hardware.
++
++ Say Y here.
++ default y
++
++
+ config BROKEN
+ bool
+
+@@ -619,6 +631,7 @@ config NUMA_BALANCING
+ depends on ARCH_SUPPORTS_NUMA_BALANCING
+ depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ depends on SMP && NUMA && MIGRATION
++ depends on !SCHED_MUQSS
+ help
+ This option adds support for automatic NUMA aware memory/task placement.
+ The mechanism is quite primitive and is based on migrating memory when
+@@ -721,9 +734,13 @@ menuconfig CGROUP_SCHED
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+- tasks.
++ tasks. In combination with MuQSS this is purely a STUB to create the
++ files associated with the CPU controller cgroup but most of the
++ controls do nothing. This is useful for working in environments and
++ with applications that will only work if this control group is
++ present.
+
+-if CGROUP_SCHED
++if CGROUP_SCHED && !SCHED_MUQSS
+ config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+@@ -830,6 +847,7 @@ config CGROUP_DEVICE
+
+ config CGROUP_CPUACCT
+ bool "Simple CPU accounting controller"
++ depends on !SCHED_MUQSS
+ help
+ Provides a simple controller for monitoring the
+ total CPU consumed by the tasks in a cgroup.
+@@ -936,6 +954,7 @@ endif # NAMESPACES
+
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
++ depends on !SCHED_MUQSS
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index 3ac6e754cf64..a5da207d71cf 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -59,9 +59,17 @@ struct task_struct init_task
+ .stack = init_stack,
+ .usage = ATOMIC_INIT(2),
+ .flags = PF_KTHREAD,
++#ifdef CONFIG_SCHED_MUQSS
++ .prio = NORMAL_PRIO,
++ .static_prio = MAX_PRIO-20,
++ .normal_prio = NORMAL_PRIO,
++ .deadline = 0,
++ .time_slice = 1000000,
++#else
+ .prio = MAX_PRIO - 20,
+ .static_prio = MAX_PRIO - 20,
+ .normal_prio = MAX_PRIO - 20,
++#endif
+ .policy = SCHED_NORMAL,
+ .cpus_allowed = CPU_MASK_ALL,
+ .nr_cpus_allowed= NR_CPUS,
+@@ -70,6 +78,7 @@ struct task_struct init_task
+ .restart_block = {
+ .fn = do_no_restart_syscall,
+ },
++#ifndef CONFIG_SCHED_MUQSS
+ .se = {
+ .group_node = LIST_HEAD_INIT(init_task.se.group_node),
+ },
+@@ -77,6 +86,7 @@ struct task_struct init_task
+ .run_list = LIST_HEAD_INIT(init_task.rt.run_list),
+ .time_slice = RR_TIMESLICE,
+ },
++#endif
+ .tasks = LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/init/main.c b/init/main.c
+index 21efbf6ace93..9eeaa98045b3 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -848,7 +848,6 @@ int __init_or_module do_one_initcall(initcall_t fn)
+ return ret;
+ }
+
+-
+ extern initcall_t __initcall_start[];
+ extern initcall_t __initcall0_start[];
+ extern initcall_t __initcall1_start[];
+@@ -1009,6 +1008,8 @@ static int __ref kernel_init(void *unused)
+
+ rcu_end_inkernel_boot();
+
++ print_scheduler_version();
++
+ if (ramdisk_execute_command) {
+ ret = run_init_process(ramdisk_execute_command);
+ if (!ret)
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 2a202a846757..1806fcac8f14 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -4,7 +4,8 @@
+
+ choice
+ prompt "Timer frequency"
+- default HZ_250
++ default HZ_100 if SCHED_MUQSS
++ default HZ_250_NODEF if !SCHED_MUQSS
+ help
+ Allows the configuration of the timer frequency. It is customary
+ to have the timer interrupt run at 1000 Hz but 100 Hz may be more
+@@ -19,11 +20,18 @@ choice
+ config HZ_100
+ bool "100 HZ"
+ help
++ 100 Hz is a suitable choice in combination with MuQSS which does
++ not rely on ticks for rescheduling interrupts, and is not Hz limited
++ for timeouts and sleeps from both the kernel and userspace.
++ This allows us to benefit from the lower overhead and higher
++ throughput of fewer timer ticks.
++
++ Non-MuQSS kernels:
+ 100 Hz is a typical choice for servers, SMP and NUMA systems
+ with lots of processors that may show reduced performance if
+ too many timer interrupts are occurring.
+
+- config HZ_250
++ config HZ_250_NODEF
+ bool "250 HZ"
+ help
+ 250 Hz is a good compromise choice allowing server performance
+@@ -31,7 +39,10 @@ choice
+ on SMP and NUMA systems. If you are going to be using NTSC video
+ or multimedia, selected 300Hz instead.
+
+- config HZ_300
++ 250 Hz is the default choice for the mainline scheduler but not
++ advantageous in combination with MuQSS.
++
++ config HZ_300_NODEF
+ bool "300 HZ"
+ help
+ 300 Hz is a good compromise choice allowing server performance
+@@ -39,7 +50,7 @@ choice
+ on SMP and NUMA systems and exactly dividing by both PAL and
+ NTSC frame rates for video and multimedia work.
+
+- config HZ_1000
++ config HZ_1000_NODEF
+ bool "1000 HZ"
+ help
+ 1000 Hz is the preferred choice for desktop systems and other
+@@ -50,9 +61,9 @@ endchoice
+ config HZ
+ int
+ default 100 if HZ_100
+- default 250 if HZ_250
+- default 300 if HZ_300
+- default 1000 if HZ_1000
++ default 250 if HZ_250_NODEF
++ default 300 if HZ_300_NODEF
++ default 1000 if HZ_1000_NODEF
+
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..1dc79ec7ad09 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,7 +1,7 @@
+
+ choice
+ prompt "Preemption Model"
+- default PREEMPT_NONE
++ default PREEMPT
+
+ config PREEMPT_NONE
+ bool "No Forced Preemption (Server)"
+@@ -17,7 +17,7 @@ config PREEMPT_NONE
+ latencies.
+
+ config PREEMPT_VOLUNTARY
+- bool "Voluntary Kernel Preemption (Desktop)"
++ bool "Voluntary Kernel Preemption (Nothing)"
+ help
+ This option reduces the latency of the kernel by adding more
+ "explicit preemption points" to the kernel code. These new
+@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
+ applications to run more 'smoothly' even when the system is
+ under load.
+
+- Select this if you are building a kernel for a desktop system.
++ Select this for no system in particular (choose Preemptible
++ instead on a desktop if you know what's good for you).
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+diff --git a/kernel/Makefile b/kernel/Makefile
+index f85ae5dfa474..78c2c0cc7508 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o \
+ extable.o params.o \
+ kthread.o sys_ni.o nsproxy.o \
+ notifier.o ksysfs.o cred.o reboot.o \
+- async.o range.o smpboot.o ucount.o
++ async.o range.o smpboot.o ucount.o skip_list.o
+
+ obj-$(CONFIG_MODULES) += kmod.o
+ obj-$(CONFIG_MULTIUSER) += groups.o
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index e2764d767f18..2f85428d22b9 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -114,7 +114,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ */
+ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+- t3 = tsk->se.sum_exec_runtime;
++ t3 = tsk_seruntime(tsk);
+
+ d->cpu_count += t1;
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 995453d9fb55..6156f0cac1c7 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -129,7 +129,7 @@ static void __exit_signal(struct task_struct *tsk)
+ sig->curr_target = next_thread(tsk);
+ }
+
+- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++ add_device_randomness((const void*) &tsk_seruntime(tsk),
+ sizeof(unsigned long long));
+
+ /*
+@@ -150,7 +150,7 @@ static void __exit_signal(struct task_struct *tsk)
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++ sig->sum_sched_runtime += tsk_seruntime(tsk);
+ sig->nr_threads--;
+ __unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index 6fc87ccda1d7..3f681501a654 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -107,6 +107,23 @@ config GENERIC_IRQ_RESERVATION_MODE
+ config IRQ_FORCED_THREADING
+ bool
+
++config FORCE_IRQ_THREADING
++ bool "Make IRQ threading compulsory"
++ depends on IRQ_FORCED_THREADING
++ default n
++ ---help---
++
++ Make IRQ threading mandatory for any IRQ handlers that support it
++ instead of being optional and requiring the threadirqs kernel
++ parameter. Instead they can be optionally disabled with the
++ nothreadirqs kernel parameter.
++
++ Enabling this may make some architectures not boot with runqueue
++ sharing and MuQSS.
++
++ Enable if you are building for a desktop or low latency system,
++ otherwise say N.
++
+ config SPARSE_IRQ
+ bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
+ ---help---
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 0f922729bab9..c2e3621d3620 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -24,7 +24,17 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++#ifdef CONFIG_FORCE_IRQ_THREADING
++__read_mostly bool force_irqthreads = true;
++#else
+ __read_mostly bool force_irqthreads;
++#endif
++static int __init setup_noforced_irqthreads(char *arg)
++{
++ force_irqthreads = false;
++ return 0;
++}
++early_param("nothreadirqs", setup_noforced_irqthreads);
+
+ static int __init setup_forced_irqthreads(char *arg)
+ {
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index cd50e99202b0..0a2deebdfbfa 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -410,6 +410,34 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
+ }
+ EXPORT_SYMBOL(kthread_bind);
+
++#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
++extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
++
++/*
++ * new_kthread_bind is a special variant of __kthread_bind_mask.
++ * For new threads to work on muqss we want to call do_set_cpus_allowed
++ * without the task_cpu being set and the task rescheduled until they're
++ * rescheduled on their own so we call __do_set_cpus_allowed directly which
++ * only changes the cpumask. This is particularly important for smpboot threads
++ * to work.
++ */
++static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
++{
++ unsigned long flags;
++
++ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
++ return;
++
++ /* It's safe because the task is inactive. */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ __do_set_cpus_allowed(p, cpumask_of(cpu));
++ p->flags |= PF_NO_SETAFFINITY;
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++#else
++#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
++#endif
++
+ /**
+ * kthread_create_on_cpu - Create a cpu bound kthread
+ * @threadfn: the function to run until signal_pending(current).
+@@ -431,7 +459,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+ cpu);
+ if (IS_ERR(p))
+ return p;
+- kthread_bind(p, cpu);
++ new_kthread_bind(p, cpu);
+ /* CPU hotplug need to bind once again when unparking the thread. */
+ set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
+ to_kthread(p)->cpu = cpu;
+diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+index 7c6631e693bc..c9f7d8b8a431 100644
+--- a/kernel/livepatch/transition.c
++++ b/kernel/livepatch/transition.c
+@@ -290,6 +290,12 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
+ return 0;
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++typedef unsigned long rq_flags_t;
++#else
++typedef struct rq_flags rq_flag_t;
++#endif
++
+ /*
+ * Try to safely switch a task to the target patch state. If it's currently
+ * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
+@@ -298,7 +304,7 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
+ static bool klp_try_switch_task(struct task_struct *task)
+ {
+ struct rq *rq;
+- struct rq_flags flags;
++ rq_flags_t flags;
+ int ret;
+ bool success = false;
+ char err_buf[STACK_ERR_BUF_SIZE];
+diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
+index 9210379c0353..2dd2f03843cc 100644
+--- a/kernel/rcu/Kconfig
++++ b/kernel/rcu/Kconfig
+@@ -93,7 +93,7 @@ config CONTEXT_TRACKING
+ config CONTEXT_TRACKING_FORCE
+ bool "Force context tracking"
+ depends on CONTEXT_TRACKING
+- default y if !NO_HZ_FULL
++ default y if !NO_HZ_FULL && !SCHED_MUQSS
+ help
+ The major pre-requirement for full dynticks to work is to
+ support the context tracking subsystem. But there are also
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index e2f9d4feff40..c8197dc6b904 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -16,14 +16,20 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+
+-obj-y += core.o loadavg.o clock.o cputime.o
++ifdef CONFIG_SCHED_MUQSS
++obj-y += MuQSS.o clock.o
++else
++obj-y += core.o loadavg.o clock.o
+ obj-y += idle_task.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o idle.o
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
++endif
++obj-y += cputime.o
++obj-y += wait.o wait_bit.o swait.o completion.o idle.o
++obj-$(CONFIG_SMP) += cpupri.o topology.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+ obj-$(CONFIG_MEMBARRIER) += membarrier.o
+diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+new file mode 100644
+index 000000000000..f8b617b27ebf
+--- /dev/null
++++ b/kernel/sched/MuQSS.c
+@@ -0,0 +1,7223 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * kernel/sched/MuQSS.c, was kernel/sched.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ * 2007-04-15 Work begun on replacing all interactivity tuning with a
++ * fair scheduling design by Con Kolivas.
++ * 2007-05-05 Load balancing (smp-nice) and other improvements
++ * by Peter Williams
++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
++ * Thomas Gleixner, Mike Kravetz
++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
++ * a whole lot of those previous things.
++ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS
++ * scheduler by Con Kolivas.
++ */
++
++#include <linux/sched.h>
++#include <linux/sched/clock.h>
++#include <uapi/linux/sched/types.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/hotplug.h>
++#include <linux/wait_bit.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/binfmts.h>
++#include <linux/context_tracking.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/compat.h>
++#include <linux/skip_list.h>
++
++#include <linux/blkdev.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/prefetch.h>
++#include <linux/profile.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/sched/isolation.h>
++#include <linux/tick.h>
++
++#include <asm/switch_to.h>
++#include <asm/tlb.h>
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "../workqueue_internal.h"
++#include "../smpboot.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++
++#include "MuQSS.h"
++
++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
++#define rt_task(p) rt_prio((p)->prio)
++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
++ (policy) == SCHED_RR)
++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
++
++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
++
++#define is_iso_policy(policy) ((policy) == SCHED_ISO)
++#define iso_task(p) unlikely(is_iso_policy((p)->policy))
++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
++
++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
++
++#define ISO_PERIOD (5 * HZ)
++
++#define STOP_PRIO (MAX_RT_PRIO - 1)
++
++/*
++ * Some helpers for converting to/from various scales. Use shifts to get
++ * approximate multiples of ten for less overhead.
++ */
++#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
++#define JIFFY_NS (1073741824 / HZ)
++#define JIFFY_US (1048576 / HZ)
++#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
++#define HALF_JIFFY_NS (1073741824 / HZ / 2)
++#define HALF_JIFFY_US (1048576 / HZ / 2)
++#define MS_TO_NS(TIME) ((TIME) << 20)
++#define MS_TO_US(TIME) ((TIME) << 10)
++#define NS_TO_MS(TIME) ((TIME) >> 20)
++#define NS_TO_US(TIME) ((TIME) >> 10)
++#define US_TO_NS(TIME) ((TIME) << 10)
++
++#define RESCHED_US (100) /* Reschedule if less than this many μs left */
++
++void print_scheduler_version(void)
++{
++ printk(KERN_INFO "MuQSS CPU scheduler v0.171 by Con Kolivas.\n");
++}
++
++#define RQSHARE_NONE 0
++#define RQSHARE_SMT 1
++#define RQSHARE_MC 2
++#define RQSHARE_SMP 3
++
++/*
++ * This determines what level of runqueue sharing will be done and is
++ * configurable at boot time with the bootparam rqshare =
++ */
++static int rqshare __read_mostly = CONFIG_SHARERQ; /* Default RQSHARE_MC */
++
++static int __init set_rqshare(char *str)
++{
++ if (!strncmp(str, "none", 4)) {
++ rqshare = RQSHARE_NONE;
++ return 0;
++ }
++ if (!strncmp(str, "smt", 3)) {
++ rqshare = RQSHARE_SMT;
++ return 0;
++ }
++ if (!strncmp(str, "mc", 2)) {
++ rqshare = RQSHARE_MC;
++ return 0;
++ }
++ if (!strncmp(str, "smp", 2)) {
++ rqshare = RQSHARE_SMP;
++ return 0;
++ }
++ return 1;
++}
++__setup("rqshare=", set_rqshare);
++
++/*
++ * This is the time all tasks within the same priority round robin.
++ * Value is in ms and set to a minimum of 6ms.
++ * Tunable via /proc interface.
++ */
++int rr_interval __read_mostly = 6;
++
++/*
++ * Tunable to choose whether to prioritise latency or throughput, simple
++ * binary yes or no
++ */
++int sched_interactive __read_mostly = 1;
++
++/*
++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
++ * are allowed to run five seconds as real time tasks. This is the total over
++ * all online cpus.
++ */
++int sched_iso_cpu __read_mostly = 70;
++
++/*
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Yield only to better priority/deadline tasks. (default)
++ * 2: Expire timeslice and recalculate deadline.
++ */
++int sched_yield_type __read_mostly = 1;
++
++/*
++ * The relative length of deadline for each priority(nice) level.
++ */
++static int prio_ratios[NICE_WIDTH] __read_mostly;
++
++
++/*
++ * The quota handed out to tasks of all priority levels when refilling their
++ * time_slice.
++ */
++static inline int timeslice(void)
++{
++ return MS_TO_US(rr_interval);
++}
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifdef CONFIG_SMP
++/*
++ * Total number of runqueues. Equals number of CPUs when there is no runqueue
++ * sharing but is usually less with SMT/MC sharing of runqueues.
++ */
++static int total_runqueues __read_mostly = 1;
++
++static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
++
++struct rq *cpu_rq(int cpu)
++{
++ return &per_cpu(runqueues, (cpu));
++}
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++/*
++ * For asym packing, by default the lower numbered cpu has higher priority.
++ */
++int __weak arch_asym_cpu_priority(int cpu)
++{
++ return -cpu;
++}
++
++int __weak arch_sd_sibling_asym_packing(void)
++{
++ return 0*SD_ASYM_PACKING;
++}
++#else
++struct rq *uprq;
++#endif /* CONFIG_SMP */
++
++#include "stats.h"
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_switch
++# define finish_arch_switch(prev) do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch() do { } while (0)
++#endif
++
++/*
++ * All common locking functions performed on rq->lock. rq->clock is local to
++ * the CPU accessing it so it can be modified just with interrupts disabled
++ * when we're not updating niffies.
++ * Looking up task_rq must be done under rq->lock to be safe.
++ */
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++ /*
++ * Since irq_time is only updated on {soft,}irq_exit, we might run into
++ * this case when a previous update_rq_clock() happened inside a
++ * {soft,}irq region.
++ *
++ * When this happens, we stop ->clock_task and only update the
++ * prev_irq_time stamp to account for the part that fit, so that a next
++ * update will consume the rest. This ensures ->clock_task is
++ * monotonic.
++ *
++ * It does however cause some slight miss-attribution of {soft,}irq
++ * time, a more accurate solution would be to update the irq_time using
++ * the current rq->clock timestamp, except that would require using
++ * atomic ops.
++ */
++ if (irq_delta > delta)
++ irq_delta = delta;
++
++ rq->prev_irq_time += irq_delta;
++ delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ if (static_key_false((&paravirt_steal_rq_enabled))) {
++ s64 steal = paravirt_steal_clock(cpu_of(rq));
++
++ steal -= rq->prev_steal_time_rq;
++
++ if (unlikely(steal > delta))
++ steal = delta;
++
++ rq->prev_steal_time_rq += steal;
++
++ delta -= steal;
++ }
++#endif
++ rq->clock_task += delta;
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++ if (unlikely(delta < 0))
++ return;
++ rq->clock += delta;
++ update_rq_clock_task(rq, delta);
++}
++
++/*
++ * Niffies are a globally increasing nanosecond counter. They're only used by
++ * update_load_avg and time_slice_expired, however deadlines are based on them
++ * across CPUs. Update them whenever we will call one of those functions, and
++ * synchronise them across CPUs whenever we hold both runqueue locks.
++ */
++static inline void update_clocks(struct rq *rq)
++{
++ s64 ndiff, minndiff;
++ long jdiff;
++
++ update_rq_clock(rq);
++ ndiff = rq->clock - rq->old_clock;
++ rq->old_clock = rq->clock;
++ jdiff = jiffies - rq->last_jiffy;
++
++ /* Subtract any niffies added by balancing with other rqs */
++ ndiff -= rq->niffies - rq->last_niffy;
++ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
++ if (minndiff < 0)
++ minndiff = 0;
++ ndiff = max(ndiff, minndiff);
++ rq->niffies += ndiff;
++ rq->last_niffy = rq->niffies;
++ if (jdiff) {
++ rq->last_jiffy += jdiff;
++ rq->last_jiffy_niffies = rq->niffies;
++ }
++}
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * Any time we have two runqueues locked we use that as an opportunity to
++ * synchronise niffies to the highest value as idle ticks may have artificially
++ * kept niffies low on one CPU and the truth can only be later.
++ */
++static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
++{
++ if (rq1->niffies > rq2->niffies)
++ rq2->niffies = rq1->niffies;
++ else
++ rq1->niffies = rq2->niffies;
++}
++
++/*
++ * double_rq_lock - safely lock two runqueues
++ *
++ * Note this does not disable interrupts like task_rq_lock,
++ * you need to do so manually before calling.
++ */
++
++/* For when we know rq1 != rq2 */
++static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ if (rq1 < rq2) {
++ raw_spin_lock(rq1->lock);
++ raw_spin_lock_nested(rq2->lock, SINGLE_DEPTH_NESTING);
++ } else {
++ raw_spin_lock(rq2->lock);
++ raw_spin_lock_nested(rq1->lock, SINGLE_DEPTH_NESTING);
++ }
++}
++
++static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ BUG_ON(!irqs_disabled());
++ if (rq1->lock == rq2->lock) {
++ raw_spin_lock(rq1->lock);
++ __acquire(rq2->lock); /* Fake it out ;) */
++ } else
++ __double_rq_lock(rq1, rq2);
++ synchronise_niffies(rq1, rq2);
++}
++
++/*
++ * double_rq_unlock - safely unlock two runqueues
++ *
++ * Note this does not restore interrupts like task_rq_unlock,
++ * you need to do so manually after calling.
++ */
++static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
++ __releases(rq1->lock)
++ __releases(rq2->lock)
++{
++ raw_spin_unlock(rq1->lock);
++ if (rq1->lock != rq2->lock)
++ raw_spin_unlock(rq2->lock);
++ else
++ __release(rq2->lock);
++}
++
++static inline void lock_all_rqs(void)
++{
++ int cpu;
++
++ preempt_disable();
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_lock(rq->lock);
++ }
++}
++
++static inline void unlock_all_rqs(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_unlock(rq->lock);
++ }
++ preempt_enable();
++}
++
++/* Specially nest trylock an rq */
++static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
++{
++ if (unlikely(!do_raw_spin_trylock(rq->lock)))
++ return false;
++ spin_acquire(rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++ synchronise_niffies(this_rq, rq);
++ return true;
++}
++
++/* Unlock a specially nested trylocked rq */
++static inline void unlock_rq(struct rq *rq)
++{
++ spin_release(rq->lock.dep_map, 1, _RET_IP_);
++ do_raw_spin_unlock(rq->lock);
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask) \
++ ({ \
++ typeof(ptr) _ptr = (ptr); \
++ typeof(mask) _mask = (mask); \
++ typeof(*_ptr) _old, _val = *_ptr; \
++ \
++ for (;;) { \
++ _old = cmpxchg(_ptr, _val, _val | _mask); \
++ if (_old == _val) \
++ break; \
++ _val = _old; \
++ } \
++ _old; \
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++ for (;;) {
++ if (!(val & _TIF_POLLING_NRFLAG))
++ return false;
++ if (val & _TIF_NEED_RESCHED)
++ return true;
++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++ if (old == val)
++ break;
++ val = old;
++ }
++ return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ return false;
++}
++#endif
++#endif
++
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++ struct wake_q_node *node = &task->wake_q;
++
++ /*
++ * Atomically grab the task, if ->wake_q is !nil already it means
++ * its already queued (either by us or someone else) and will get the
++ * wakeup due to that.
++ *
++ * This cmpxchg() implies a full barrier, which pairs with the write
++ * barrier implied by the wakeup in wake_up_q().
++ */
++ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
++ return;
++
++ get_task_struct(task);
++
++ /*
++ * The head is context local, there can be no concurrency.
++ */
++ *head->lastp = node;
++ head->lastp = &node->next;
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++ struct wake_q_node *node = head->first;
++
++ while (node != WAKE_Q_TAIL) {
++ struct task_struct *task;
++
++ task = container_of(node, struct task_struct, wake_q);
++ BUG_ON(!task);
++ /* Task can safely be re-inserted now */
++ node = node->next;
++ task->wake_q.next = NULL;
++
++ /*
++ * wake_up_process() implies a wmb() to pair with the queueing
++ * in wake_q_add() so as not to miss wakeups.
++ */
++ wake_up_process(task);
++ put_task_struct(task);
++ }
++}
++
++static inline void smp_sched_reschedule(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ smp_send_reschedule(cpu);
++}
++
++/*
++ * resched_task - mark a task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_task(struct task_struct *p)
++{
++ int cpu;
++#ifdef CONFIG_LOCKDEP
++ /* Kernel threads call this when creating workqueues while still
++ * inactive from __kthread_bind_mask, holding only the pi_lock */
++ if (!(p->flags & PF_KTHREAD)) {
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(rq->lock);
++ }
++#endif
++ if (test_tsk_need_resched(p))
++ return;
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(p))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * A task that is not running or queued will not have a node set.
++ * A task that is queued but not running will have a node set.
++ * A task that is currently running will have ->on_cpu set but no node set.
++ */
++static inline bool task_queued(struct task_struct *p)
++{
++ return !skiplist_node_empty(&p->node);
++}
++
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
++static inline void resched_if_idle(struct rq *rq);
++
++/* Dodgy workaround till we figure out where the softirqs are going */
++static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
++{
++ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
++ do_softirq_own_stack();
++}
++
++static inline bool deadline_before(u64 deadline, u64 time)
++{
++ return (deadline < time);
++}
++
++/*
++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
++ * is the key to everything. It distributes cpu fairly amongst tasks of the
++ * same nice value, it proportions cpu according to nice level, it means the
++ * task that last woke up the longest ago has the earliest deadline, thus
++ * ensuring that interactive tasks get low latency on wake up. The CPU
++ * proportion works out to the square of the virtual deadline difference, so
++ * this equation will give nice 19 3% CPU compared to nice 0.
++ */
++static inline u64 prio_deadline_diff(int user_prio)
++{
++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
++}
++
++static inline u64 task_deadline_diff(struct task_struct *p)
++{
++ return prio_deadline_diff(TASK_USER_PRIO(p));
++}
++
++static inline u64 static_deadline_diff(int static_prio)
++{
++ return prio_deadline_diff(USER_PRIO(static_prio));
++}
++
++static inline int longest_deadline_diff(void)
++{
++ return prio_deadline_diff(39);
++}
++
++static inline int ms_longest_deadline_diff(void)
++{
++ return NS_TO_MS(longest_deadline_diff());
++}
++
++static inline bool rq_local(struct rq *rq);
++
++#ifndef SCHED_CAPACITY_SCALE
++#define SCHED_CAPACITY_SCALE 1024
++#endif
++
++static inline int rq_load(struct rq *rq)
++{
++ return rq->nr_running;
++}
++
++/*
++ * Update the load average for feeding into cpu frequency governors. Use a
++ * rough estimate of a rolling average with ~ time constant of 32ms.
++ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
++ * Make sure a call to update_clocks has been made before calling this to get
++ * an updated rq->niffies.
++ */
++static void update_load_avg(struct rq *rq, unsigned int flags)
++{
++ unsigned long us_interval, curload;
++ long load;
++
++ if (unlikely(rq->niffies <= rq->load_update))
++ return;
++
++ us_interval = NS_TO_US(rq->niffies - rq->load_update);
++ curload = rq_load(rq);
++ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
++ if (unlikely(load < 0))
++ load = 0;
++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
++ rq->load_avg = load;
++
++ rq->load_update = rq->niffies;
++ if (likely(rq_local(rq)))
++ cpufreq_trigger(rq, flags);
++}
++
++/*
++ * Removing from the runqueue. Enter with rq locked. Deleting a task
++ * from the skip list is done via the stored node reference in the task struct
++ * and does not require a full look up. Thus it occurs in O(k) time where k
++ * is the "level" of the list the task was stored at - usually < 4, max 8.
++ */
++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ skiplist_delete(rq->sl, &p->node);
++ rq->best_key = rq->node->next[0]->key;
++ update_clocks(rq);
++
++ if (!(flags & DEQUEUE_SAVE))
++ sched_info_dequeued(task_rq(p), p);
++ rq->nr_running--;
++ update_load_avg(rq, flags);
++}
++
++#ifdef CONFIG_PREEMPT_RCU
++static bool rcu_read_critical(struct task_struct *p)
++{
++ return p->rcu_read_unlock_special.b.blocked;
++}
++#else /* CONFIG_PREEMPT_RCU */
++#define rcu_read_critical(p) (false)
++#endif /* CONFIG_PREEMPT_RCU */
++
++/*
++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
++ * an idle task, we ensure none of the following conditions are met.
++ */
++static bool idleprio_suitable(struct task_struct *p)
++{
++ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
++ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
++}
++
++/*
++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
++ * that the iso_refractory flag is not set.
++ */
++static inline bool isoprio_suitable(struct rq *rq)
++{
++ return !rq->iso_refractory;
++}
++
++/*
++ * Adding to the runqueue. Enter with rq locked.
++ */
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ unsigned int randseed, cflags = 0;
++ u64 sl_id;
++
++ if (!rt_task(p)) {
++ /* Check it hasn't gotten rt from PI */
++ if ((idleprio_task(p) && idleprio_suitable(p)) ||
++ (iso_task(p) && isoprio_suitable(rq)))
++ p->prio = p->normal_prio;
++ else
++ p->prio = NORMAL_PRIO;
++ }
++ /*
++ * The sl_id key passed to the skiplist generates a sorted list.
++ * Realtime and sched iso tasks run FIFO so they only need be sorted
++ * according to priority. The skiplist will put tasks of the same
++ * key inserted later in FIFO order. Tasks of sched normal, batch
++ * and idleprio are sorted according to their deadlines. Idleprio
++ * tasks are offset by an impossibly large deadline value ensuring
++ * they get sorted into last positions, but still according to their
++ * own deadlines. This creates a "landscape" of skiplists running
++ * from priority 0 realtime in first place to the lowest priority
++ * idleprio tasks last. Skiplist insertion is an O(log n) process.
++ */
++ if (p->prio <= ISO_PRIO) {
++ sl_id = p->prio;
++ cflags = SCHED_CPUFREQ_RT;
++ } else {
++ sl_id = p->deadline;
++ if (idleprio_task(p)) {
++ if (p->prio == IDLE_PRIO)
++ sl_id |= 0xF000000000000000;
++ else
++ sl_id += longest_deadline_diff();
++ }
++ }
++ /*
++ * Some architectures don't have better than microsecond resolution
++ * so mask out ~microseconds as the random seed for skiplist insertion.
++ */
++ update_clocks(rq);
++ if (!(flags & ENQUEUE_RESTORE))
++ sched_info_queued(rq, p);
++ randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
++ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
++ rq->best_key = rq->node->next[0]->key;
++ if (p->in_iowait)
++ cflags |= SCHED_CPUFREQ_IOWAIT;
++ rq->nr_running++;
++ update_load_avg(rq, cflags);
++}
++
++/*
++ * Returns the relative length of deadline all compared to the shortest
++ * deadline which is that of nice -20.
++ */
++static inline int task_prio_ratio(struct task_struct *p)
++{
++ return prio_ratios[TASK_USER_PRIO(p)];
++}
++
++/*
++ * task_timeslice - all tasks of all priorities get the exact same timeslice
++ * length. CPU distribution is handled by giving different deadlines to
++ * tasks of different priorities. Use 128 as the base value for fast shifts.
++ */
++static inline int task_timeslice(struct task_struct *p)
++{
++ return (rr_interval * task_prio_ratio(p) / 128);
++}
++
++#ifdef CONFIG_SMP
++/* Entered with rq locked */
++static inline void resched_if_idle(struct rq *rq)
++{
++ if (rq_idle(rq))
++ resched_task(rq->curr);
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return (rq->cpu == smp_processor_id());
++}
++#ifdef CONFIG_SMT_NICE
++static const cpumask_t *thread_cpumask(int cpu);
++
++/* Find the best real time priority running on any SMT siblings of cpu and if
++ * none are running, the static priority of the best deadline task running.
++ * The lookups to the other runqueues is done lockless as the occasional wrong
++ * value would be harmless. */
++static int best_smt_bias(struct rq *this_rq)
++{
++ int other_cpu, best_bias = 0;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq = cpu_rq(other_cpu);
++
++ if (rq_idle(rq))
++ continue;
++ if (unlikely(!rq->online))
++ continue;
++ if (!rq->rq_mm)
++ continue;
++ if (likely(rq->rq_smt_bias > best_bias))
++ best_bias = rq->rq_smt_bias;
++ }
++ return best_bias;
++}
++
++static int task_prio_bias(struct task_struct *p)
++{
++ if (rt_task(p))
++ return 1 << 30;
++ else if (task_running_iso(p))
++ return 1 << 29;
++ else if (task_running_idle(p))
++ return 0;
++ return MAX_PRIO - p->static_prio;
++}
++
++static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
++{
++ return true;
++}
++
++static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
++
++/* We've already decided p can run on CPU, now test if it shouldn't for SMT
++ * nice reasons. */
++static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
++{
++ int best_bias, task_bias;
++
++ /* Kernel threads always run */
++ if (unlikely(!p->mm))
++ return true;
++ if (rt_task(p))
++ return true;
++ if (!idleprio_suitable(p))
++ return true;
++ best_bias = best_smt_bias(this_rq);
++ /* The smt siblings are all idle or running IDLEPRIO */
++ if (best_bias < 1)
++ return true;
++ task_bias = task_prio_bias(p);
++ if (task_bias < 1)
++ return false;
++ if (task_bias >= best_bias)
++ return true;
++ /* Dither 25% cpu of normal tasks regardless of nice difference */
++ if (best_bias % 4 == 1)
++ return true;
++ /* Sorry, you lose */
++ return false;
++}
++#else /* CONFIG_SMT_NICE */
++#define smt_schedule(p, this_rq) (true)
++#endif /* CONFIG_SMT_NICE */
++
++static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
++{
++ set_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++/*
++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
++ * allow easy lookup of whether any suitable idle CPUs are available.
++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
++ * idle_cpus variable than to do a full bitmask check when we are busy. The
++ * bits are set atomically but read locklessly as occasional false positive /
++ * negative is harmless.
++ */
++static inline void set_cpuidle_map(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ atomic_set_cpu(cpu, &cpu_idle_map);
++}
++
++static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
++{
++ clear_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++ atomic_clear_cpu(cpu, &cpu_idle_map);
++}
++
++static bool suitable_idle_cpus(struct task_struct *p)
++{
++ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
++}
++
++/*
++ * Resched current on rq. We don't know if rq is local to this CPU nor if it
++ * is locked so we do not use an intermediate variable for the task to avoid
++ * having it dereferenced.
++ */
++static void resched_curr(struct rq *rq)
++{
++ int cpu;
++
++ if (test_tsk_need_resched(rq->curr))
++ return;
++
++ rq->preempt = rq->curr;
++ cpu = rq->cpu;
++
++ /* We're doing this without holding the rq lock if it's not task_rq */
++
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(rq->curr);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(rq->curr))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++#define CPUIDLE_DIFF_THREAD (1)
++#define CPUIDLE_DIFF_CORE (2)
++#define CPUIDLE_CACHE_BUSY (4)
++#define CPUIDLE_DIFF_CPU (8)
++#define CPUIDLE_THREAD_BUSY (16)
++#define CPUIDLE_DIFF_NODE (32)
++
++/*
++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
++ * lowest value would give the most suitable CPU to schedule p onto next. The
++ * order works out to be the following:
++ *
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++ */
++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
++{
++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
++ CPUIDLE_DIFF_THREAD;
++ int cpu_tmp;
++
++ if (cpumask_test_cpu(best_cpu, tmpmask))
++ goto out;
++
++ for_each_cpu(cpu_tmp, tmpmask) {
++ int ranking, locality;
++ struct rq *tmp_rq;
++
++ ranking = 0;
++ tmp_rq = cpu_rq(cpu_tmp);
++
++ locality = rq->cpu_locality[cpu_tmp];
++#ifdef CONFIG_NUMA
++ if (locality > 3)
++ ranking |= CPUIDLE_DIFF_NODE;
++ else
++#endif
++ if (locality > 2)
++ ranking |= CPUIDLE_DIFF_CPU;
++#ifdef CONFIG_SCHED_MC
++ else if (locality == 2)
++ ranking |= CPUIDLE_DIFF_CORE;
++ else if (!(tmp_rq->cache_idle(tmp_rq)))
++ ranking |= CPUIDLE_CACHE_BUSY;
++#endif
++#ifdef CONFIG_SCHED_SMT
++ if (locality == 1)
++ ranking |= CPUIDLE_DIFF_THREAD;
++ if (!(tmp_rq->siblings_idle(tmp_rq)))
++ ranking |= CPUIDLE_THREAD_BUSY;
++#endif
++ if (ranking < best_ranking) {
++ best_cpu = cpu_tmp;
++ best_ranking = ranking;
++ }
++ }
++out:
++ return best_cpu;
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++ struct rq *this_rq = cpu_rq(this_cpu);
++
++ return (this_rq->cpu_locality[that_cpu] < 3);
++}
++
++/* As per resched_curr but only will resched idle task */
++static inline void resched_idle(struct rq *rq)
++{
++ if (test_tsk_need_resched(rq->idle))
++ return;
++
++ rq->preempt = rq->idle;
++
++ set_tsk_need_resched(rq->idle);
++
++ if (rq_local(rq)) {
++ set_preempt_need_resched();
++ return;
++ }
++
++ smp_sched_reschedule(rq->cpu);
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ cpumask_t tmpmask;
++ struct rq *rq;
++ int best_cpu;
++
++ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
++ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
++ rq = cpu_rq(best_cpu);
++ if (!smt_schedule(p, rq))
++ return NULL;
++ rq->preempt = p;
++ resched_idle(rq);
++ return rq;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq->rq_order[cpu];
++}
++#else /* CONFIG_SMP */
++static inline void set_cpuidle_map(int cpu)
++{
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++}
++
++static inline bool suitable_idle_cpus(struct task_struct *p)
++{
++ return uprq->curr == uprq->idle;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++}
++
++static inline void resched_curr(struct rq *rq)
++{
++ resched_task(rq->curr);
++}
++
++static inline void resched_if_idle(struct rq *rq)
++{
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return true;
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq;
++}
++
++static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
++{
++ return true;
++}
++#endif /* CONFIG_SMP */
++
++static inline int normal_prio(struct task_struct *p)
++{
++ if (has_rt_policy(p))
++ return MAX_RT_PRIO - 1 - p->rt_priority;
++ if (idleprio_task(p))
++ return IDLE_PRIO;
++ if (iso_task(p))
++ return ISO_PRIO;
++ return NORMAL_PRIO;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue. Enter with rq locked.
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++ resched_if_idle(rq);
++
++ /*
++ * Sleep time is in units of nanosecs, so shift by 20 to get a
++ * milliseconds-range estimation of the amount of time that the task
++ * spent sleeping:
++ */
++ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ if (p->state == TASK_UNINTERRUPTIBLE)
++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
++ (rq->niffies - p->last_ran) >> 20);
++ }
++
++ p->prio = effective_prio(p);
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++
++ enqueue_task(rq, p, 0);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++}
++
++/*
++ * deactivate_task - If it's running, it's not on the runqueue and we can just
++ * decrement the nr_running. Enter with rq locked.
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++
++ p->on_rq = 0;
++ sched_info_dequeued(rq, p);
++}
++
++#ifdef CONFIG_SMP
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++ struct rq *rq;
++
++ if (task_cpu(p) == new_cpu)
++ return;
++
++ /* Do NOT call set_task_cpu on a currently queued task as we will not
++ * be reliably holding the rq lock after changing CPU. */
++ BUG_ON(task_queued(p));
++ rq = task_rq(p);
++
++#ifdef CONFIG_LOCKDEP
++ /*
++ * The caller should hold either p->pi_lock or rq->lock, when changing
++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++ *
++ * Furthermore, all task_rq users should acquire both locks, see
++ * task_rq_lock().
++ */
++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++ lockdep_is_held(rq->lock)));
++#endif
++
++ trace_sched_migrate_task(p, new_cpu);
++ perf_event_task_migrate(p);
++
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfully executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
++
++ p->wake_cpu = new_cpu;
++
++ if (task_running(rq, p)) {
++ /*
++ * We should only be calling this on a running task if we're
++ * holding rq lock.
++ */
++ lockdep_assert_held(rq->lock);
++
++ /*
++ * We can't change the task_thread_info CPU on a running task
++ * as p will still be protected by the rq lock of the CPU it
++ * is still running on so we only set the wake_cpu for it to be
++ * lazily updated once off the CPU.
++ */
++ return;
++ }
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ p->cpu = new_cpu;
++#else
++ task_thread_info(p)->cpu = new_cpu;
++#endif
++ /* We're no longer protecting p after this point since we're holding
++ * the wrong runqueue lock. */
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Move a task off the runqueue and take it to a cpu for it will
++ * become the running task.
++ */
++static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
++{
++ struct rq *p_rq = task_rq(p);
++
++ dequeue_task(p_rq, p, DEQUEUE_SAVE);
++ if (p_rq != rq) {
++ sched_info_dequeued(p_rq, p);
++ sched_info_queued(rq, p);
++ }
++ set_task_cpu(p, cpu);
++}
++
++/*
++ * Returns a descheduling task to the runqueue unless it is being
++ * deactivated.
++ */
++static inline void return_task(struct task_struct *p, struct rq *rq,
++ int cpu, bool deactivate)
++{
++ if (deactivate)
++ deactivate_task(p, rq);
++ else {
++#ifdef CONFIG_SMP
++ /*
++ * set_task_cpu was called on the running task that doesn't
++ * want to deactivate so it has to be enqueued to a different
++ * CPU and we need its lock. Tag it to be moved with as the
++ * lock is dropped in finish_lock_switch.
++ */
++ if (unlikely(p->wake_cpu != cpu))
++ p->on_rq = TASK_ON_RQ_MIGRATING;
++ else
++#endif
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ }
++}
++
++/* Enter with rq lock held. We know p is on the local cpu */
++static inline void __set_tsk_resched(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ int running, queued;
++ unsigned long flags;
++ unsigned long ncsw;
++ struct rq *rq;
++
++ for (;;) {
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since this will return false
++ * if the runqueue has changed and p is actually now
++ * running somewhere else!
++ */
++ while (task_running(rq, p)) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ trace_sched_wait_task(p);
++ running = task_running(rq, p);
++ queued = task_on_rq_queued(p);
++ ncsw = 0;
++ if (!match_state || p->state == match_state)
++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++ task_rq_unlock(rq, p, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it was still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(queued)) {
++ ktime_t to = NSEC_PER_SEC / HZ;
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_sched_reschedule(cpu);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++#endif
++
++/*
++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
++ * between themselves, they cooperatively multitask. An idle rq scores as
++ * prio PRIO_LIMIT so it is always preempted.
++ */
++static inline bool
++can_preempt(struct task_struct *p, int prio, u64 deadline)
++{
++ /* Better static priority RT task or better policy preemption */
++ if (p->prio < prio)
++ return true;
++ if (p->prio > prio)
++ return false;
++ if (p->policy == SCHED_BATCH)
++ return false;
++ /* SCHED_NORMAL and ISO will preempt based on deadline */
++ if (!deadline_before(p->deadline, deadline))
++ return false;
++ return true;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Check to see if p can run on cpu, and if not, whether there are any online
++ * CPUs it can run on instead. This only happens with the hotplug threads that
++ * bring up the CPUs.
++ */
++static inline bool sched_other_cpu(struct task_struct *p, int cpu)
++{
++ if (likely(cpumask_test_cpu(cpu, &p->cpus_allowed)))
++ return false;
++ if (p->nr_cpus_allowed == 1) {
++ cpumask_t valid_mask;
++
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask);
++ if (unlikely(cpumask_empty(&valid_mask)))
++ return false;
++ }
++ return true;
++}
++
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ if (cpumask_test_cpu(cpu, &p->cpus_allowed))
++ return false;
++ return true;
++}
++
++#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ int i, this_entries = rq_load(this_rq);
++ cpumask_t tmp;
++
++ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
++ return;
++
++ /* IDLEPRIO tasks never preempt anything but idle */
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++
++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *rq = this_rq->cpu_order[i];
++
++ if (!cpumask_test_cpu(rq->cpu, &tmp))
++ continue;
++
++ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
++ continue;
++ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
++ /* We set rq->preempting lockless, it's a hint only */
++ rq->preempting = p;
++ resched_curr(rq);
++ return;
++ }
++ }
++}
++
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check);
++#else /* CONFIG_SMP */
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ return false;
++}
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
++ resched_curr(uprq);
++}
++
++static inline int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ return set_cpus_allowed_ptr(p, new_mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * wake flags
++ */
++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
++#define WF_FORK 0x02 /* child wakeup after fork */
++#define WF_MIGRATED 0x04 /* internal use, task got migrated */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq;
++
++ if (!schedstat_enabled())
++ return;
++
++ rq = this_rq();
++
++#ifdef CONFIG_SMP
++ if (cpu == rq->cpu) {
++ __schedstat_inc(rq->ttwu_local);
++ } else {
++ struct sched_domain *sd;
++
++ rcu_read_lock();
++ for_each_domain(rq->cpu, sd) {
++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
++ __schedstat_inc(sd->ttwu_wake_remote);
++ break;
++ }
++ }
++ rcu_read_unlock();
++ }
++
++#endif /* CONFIG_SMP */
++
++ __schedstat_inc(rq->ttwu_count);
++}
++
++static inline void ttwu_activate(struct rq *rq, struct task_struct *p)
++{
++ activate_task(p, rq);
++
++ /* if a worker is waking up, notify the workqueue */
++ if (p->flags & PF_WQ_WORKER)
++ wq_worker_waking_up(p, cpu_of(rq));
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ /*
++ * Sync wakeups (i.e. those types of wakeups where the waker
++ * has indicated that it will leave the CPU in short order)
++ * don't trigger a preemption if there are no idle cpus,
++ * instead waiting for current to deschedule.
++ */
++ if (wake_flags & WF_SYNC)
++ resched_suitable_idle(p);
++ else
++ try_preempt(p, rq);
++ p->state = TASK_RUNNING;
++ trace_sched_wakeup(p);
++}
++
++static void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ lockdep_assert_held(rq->lock);
++
++#ifdef CONFIG_SMP
++ if (p->sched_contributes_to_load)
++ rq->nr_uninterruptible--;
++#endif
++
++ ttwu_activate(rq, p);
++ ttwu_do_wakeup(rq, p, wake_flags);
++}
++
++/*
++ * Called in case the task @p isn't fully descheduled from its runqueue,
++ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
++ * since all we need to do is flip p->state to TASK_RUNNING, since
++ * the task is still ->on_rq.
++ */
++static int ttwu_remote(struct task_struct *p, int wake_flags)
++{
++ struct rq *rq;
++ int ret = 0;
++
++ rq = __task_rq_lock(p);
++ if (likely(task_on_rq_queued(p))) {
++ ttwu_do_wakeup(rq, p, wake_flags);
++ ret = 1;
++ }
++ __task_rq_unlock(rq);
++
++ return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void)
++{
++ struct rq *rq = this_rq();
++ struct llist_node *llist = llist_del_all(&rq->wake_list);
++ struct task_struct *p, *t;
++ unsigned long flags;
++
++ if (!llist)
++ return;
++
++ rq_lock_irqsave(rq, &flags);
++
++ llist_for_each_entry_safe(p, t, llist, wake_entry)
++ ttwu_do_activate(rq, p, 0);
++
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++void scheduler_ipi(void)
++{
++ /*
++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
++ * TIF_NEED_RESCHED remotely (for the first time) will also send
++ * this IPI.
++ */
++ preempt_fold_need_resched();
++
++ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
++ return;
++
++ /*
++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
++ * traditionally all their work was done from the interrupt return
++ * path. Now that we actually do some work, we need to make sure
++ * we do call them.
++ *
++ * Some archs already do call them, luckily irq_enter/exit nest
++ * properly.
++ *
++ * Arguably we should visit all archs and update all handlers,
++ * however a fair share of IPIs are still resched only so this would
++ * somewhat pessimize the simple resched case.
++ */
++ irq_enter();
++ sched_ttwu_pending();
++ irq_exit();
++}
++
++static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
++ if (!set_nr_if_polling(rq->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++ }
++}
++
++void wake_up_if_idle(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rcu_read_lock();
++
++ if (!is_idle_task(rcu_dereference(rq->curr)))
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ rq_lock_irqsave(rq, &flags);
++ if (likely(is_idle_task(rq->curr)))
++ smp_sched_reschedule(cpu);
++ /* Else cpu is not in idle, do nothing here */
++ rq_unlock_irqrestore(rq, &flags);
++ }
++
++out:
++ rcu_read_unlock();
++}
++
++static int valid_task_cpu(struct task_struct *p)
++{
++ cpumask_t valid_mask;
++
++ if (p->flags & PF_KTHREAD)
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_all_mask);
++ else
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask);
++
++ if (unlikely(!cpumask_weight(&valid_mask))) {
++ /* We shouldn't be hitting this any more */
++ printk(KERN_WARNING "SCHED: No cpumask for %s/%d weight %d\n", p->comm,
++ p->pid, cpumask_weight(&p->cpus_allowed));
++ return cpumask_any(&p->cpus_allowed);
++ }
++ return cpumask_any(&valid_mask);
++}
++
++/*
++ * For a task that's just being woken up we have a valuable balancing
++ * opportunity so choose the nearest cache most lightly loaded runqueue.
++ * Entered with rq locked and returns with the chosen runqueue locked.
++ */
++static inline int select_best_cpu(struct task_struct *p)
++{
++ unsigned int idlest = ~0U;
++ struct rq *rq = NULL;
++ int i;
++
++ if (suitable_idle_cpus(p)) {
++ int cpu = task_cpu(p);
++
++ if (unlikely(needs_other_cpu(p, cpu)))
++ cpu = valid_task_cpu(p);
++ rq = resched_best_idle(p, cpu);
++ if (likely(rq))
++ return rq->cpu;
++ }
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *other_rq = task_rq(p)->cpu_order[i];
++ int entries;
++
++ if (!other_rq->online)
++ continue;
++ if (needs_other_cpu(p, other_rq->cpu))
++ continue;
++ entries = rq_load(other_rq);
++ if (entries >= idlest)
++ continue;
++ idlest = entries;
++ rq = other_rq;
++ }
++ if (unlikely(!rq))
++ return task_cpu(p);
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static int valid_task_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static inline int select_best_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ return NULL;
++}
++#endif /* CONFIG_SMP */
++
++static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++#if defined(CONFIG_SMP)
++ if (!cpus_share_cache(smp_processor_id(), cpu)) {
++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++ ttwu_queue_remote(p, cpu, wake_flags);
++ return;
++ }
++#endif
++ rq_lock(rq);
++ ttwu_do_activate(rq, p, wake_flags);
++ rq_unlock(rq);
++}
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * Return: %true if @p was woken up, %false if it was already running.
++ * or @state didn't match @p's state.
++ */
++static int
++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++ unsigned long flags;
++ int cpu, success = 0;
++
++ /*
++ * If we are going to wake up a thread waiting for CONDITION we
++ * need to ensure that CONDITION=1 done by the caller can not be
++ * reordered with p->state check below. This pairs with mb() in
++ * set_current_state() the waiting thread does.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ smp_mb__after_spinlock();
++ /* state is a volatile long, どうして、分からない */
++ if (!((unsigned int)p->state & state))
++ goto out;
++
++ trace_sched_waking(p);
++
++ /* We're going to change ->state: */
++ success = 1;
++ cpu = task_cpu(p);
++
++ /*
++ * Ensure we load p->on_rq _after_ p->state, otherwise it would
++ * be possible to, falsely, observe p->on_rq == 0 and get stuck
++ * in smp_cond_load_acquire() below.
++ *
++ * sched_ttwu_pending() try_to_wake_up()
++ * [S] p->on_rq = 1; [L] P->state
++ * UNLOCK rq->lock -----.
++ * \
++ * +--- RMB
++ * schedule() /
++ * LOCK rq->lock -----'
++ * UNLOCK rq->lock
++ *
++ * [task p]
++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
++ *
++ * Pairs with the UNLOCK+LOCK on rq->lock from the
++ * last wakeup of our task and the schedule that got our task
++ * current.
++ */
++ smp_rmb();
++ if (p->on_rq && ttwu_remote(p, wake_flags))
++ goto stat;
++
++#ifdef CONFIG_SMP
++ /*
++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++ * possible to, falsely, observe p->on_cpu == 0.
++ *
++ * One must be running (->on_cpu == 1) in order to remove oneself
++ * from the runqueue.
++ *
++ * [S] ->on_cpu = 1; [L] ->on_rq
++ * UNLOCK rq->lock
++ * RMB
++ * LOCK rq->lock
++ * [S] ->on_rq = 0; [L] ->on_cpu
++ *
++ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
++ * from the consecutive calls to schedule(); the first switching to our
++ * task, the second putting it to sleep.
++ */
++ smp_rmb();
++
++ /*
++ * If the owning (remote) CPU is still in the middle of schedule() with
++ * this task as prev, wait until its done referencing the task.
++ *
++ * Pairs with the smp_store_release() in finish_task().
++ *
++ * This ensures that tasks getting woken will be fully ordered against
++ * their previous state and preserve Program Order.
++ */
++ smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++ p->sched_contributes_to_load = !!task_contributes_to_load(p);
++ p->state = TASK_WAKING;
++
++ if (p->in_iowait) {
++ delayacct_blkio_end(p);
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++ cpu = select_best_cpu(p);
++ if (task_cpu(p) != cpu)
++ set_task_cpu(p, cpu);
++
++#else /* CONFIG_SMP */
++
++ if (p->in_iowait) {
++ delayacct_blkio_end(p);
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++#endif /* CONFIG_SMP */
++
++ ttwu_queue(p, cpu, wake_flags);
++stat:
++ ttwu_stat(p, cpu, wake_flags);
++out:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return success;
++}
++
++/**
++ * try_to_wake_up_local - try to wake up a local task with rq lock held
++ * @p: the thread to be awakened
++ *
++ * Put @p on the run-queue if it's not already there. The caller must
++ * ensure that rq is locked and, @p is not the current task.
++ * rq stays locked over invocation.
++ */
++static void try_to_wake_up_local(struct task_struct *p)
++{
++ struct rq *rq = task_rq(p);
++
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
++ lockdep_assert_held(rq->lock);
++
++ if (!raw_spin_trylock(&p->pi_lock)) {
++ /*
++ * This is OK, because current is on_cpu, which avoids it being
++ * picked for load-balance and preemption/IRQs are still
++ * disabled avoiding further scheduler activity on it and we've
++ * not yet picked a replacement task.
++ */
++ rq_unlock(rq);
++ raw_spin_lock(&p->pi_lock);
++ rq_lock(rq);
++ }
++
++ if (!(p->state & TASK_NORMAL))
++ goto out;
++
++ trace_sched_waking(p);
++
++ if (!task_on_rq_queued(p)) {
++ if (p->in_iowait) {
++ delayacct_blkio_end(p);
++ atomic_dec(&rq->nr_iowait);
++ }
++ ttwu_activate(rq, p);
++ }
++
++ ttwu_do_wakeup(rq, p, 0);
++ ttwu_stat(p, smp_processor_id(), 0);
++out:
++ raw_spin_unlock(&p->pi_lock);
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * It may be assumed that this function implies a write memory barrier before
++ * changing the task state if and only if any tasks are woken up.
++ */
++int wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++static void time_slice_expired(struct task_struct *p, struct rq *rq);
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ */
++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
++{
++ unsigned long flags;
++ int cpu = get_cpu();
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++ /*
++ * We mark the process as NEW here. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_NEW;
++
++ /*
++ * The process state is set to the same value of the process executing
++ * do_fork() code. That is running. This guarantees that nobody will
++ * actually run it, and a signal or other external event cannot wake
++ * it up and insert it on the runqueue either.
++ */
++
++ /* Should be reset in fork.c but done here for ease of MuQSS patching */
++ p->on_cpu =
++ p->on_rq =
++ p->utime =
++ p->stime =
++ p->sched_time =
++ p->stime_ns =
++ p->utime_ns = 0;
++ skiplist_node_init(&p->node);
++
++ /*
++ * Revert to default priority/policy on fork if requested.
++ */
++ if (unlikely(p->sched_reset_on_fork)) {
++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
++ p->policy = SCHED_NORMAL;
++ p->normal_prio = normal_prio(p);
++ }
++
++ if (PRIO_TO_NICE(p->static_prio) < 0) {
++ p->static_prio = NICE_TO_PRIO(0);
++ p->normal_prio = p->static_prio;
++ }
++
++ /*
++ * We don't need the reset flag anymore after the fork. It has
++ * fulfilled its duty:
++ */
++ p->sched_reset_on_fork = 0;
++ }
++
++ /*
++ * Silence PROVE_RCU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ set_task_cpu(p, cpu);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++ if (unlikely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++ init_task_preempt_count(p);
++
++ put_cpu();
++ return 0;
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++ if (enabled)
++ static_branch_enable(&sched_schedstats);
++ else
++ static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++ if (!schedstat_enabled()) {
++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++ static_branch_enable(&sched_schedstats);
++ }
++}
++
++static int __init setup_schedstats(char *str)
++{
++ int ret = 0;
++ if (!str)
++ goto out;
++
++ /*
++ * This code is called before jump labels have been set up, so we can't
++ * change the static branch directly just yet. Instead set a temporary
++ * variable so init_schedstats() can do it later.
++ */
++ if (!strcmp(str, "enable")) {
++ __sched_schedstats = true;
++ ret = 1;
++ } else if (!strcmp(str, "disable")) {
++ __sched_schedstats = false;
++ ret = 1;
++ }
++out:
++ if (!ret)
++ pr_warn("Unable to parse schedstats=\n");
++
++ return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++ set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table t;
++ int err;
++ int state = static_branch_likely(&sched_schedstats);
++
++ if (write && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ t = *table;
++ t.data = &state;
++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++ if (err < 0)
++ return err;
++ if (write)
++ set_schedstats(state);
++ return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
++
++static void account_task_cpu(struct rq *rq, struct task_struct *p)
++{
++ update_clocks(rq);
++ /* This isn't really a context switch but accounting is the same */
++ update_cpu_clock_switch(rq, p);
++ p->last_ran = rq->niffies;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++static inline int hrexpiry_enabled(struct rq *rq)
++{
++ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
++ return 0;
++ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
++}
++
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++static inline void hrexpiry_clear(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (hrtimer_active(&rq->hrexpiry_timer))
++ hrtimer_cancel(&rq->hrexpiry_timer);
++}
++
++/*
++ * High-resolution time_slice expiry.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
++ struct task_struct *p;
++
++ /* This can happen during CPU hotplug / resume */
++ if (unlikely(cpu_of(rq) != smp_processor_id()))
++ goto out;
++
++ /*
++ * We're doing this without the runqueue lock but this should always
++ * be run on the local CPU. Time slice should run out in __schedule
++ * but we set it to zero here in case niffies is slightly less.
++ */
++ p = rq->curr;
++ p->time_slice = 0;
++ __set_tsk_resched(p);
++out:
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * Called to set the hrexpiry timer state.
++ *
++ * called with irqs disabled from the local CPU only
++ */
++static void hrexpiry_start(struct rq *rq, u64 delay)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++
++ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
++ HRTIMER_MODE_REL_PINNED);
++}
++
++static void init_rq_hrexpiry(struct rq *rq)
++{
++ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrexpiry_timer.function = hrexpiry;
++}
++
++static inline int rq_dither(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return HALF_JIFFY_US;
++ return 0;
++}
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++ struct task_struct *parent, *rq_curr;
++ struct rq *rq, *new_rq;
++ unsigned long flags;
++
++ parent = p->parent;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ p->state = TASK_RUNNING;
++ /* Task_rq can't change yet on a new task */
++ new_rq = rq = task_rq(p);
++ if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
++ set_task_cpu(p, valid_task_cpu(p));
++ new_rq = task_rq(p);
++ }
++
++ double_rq_lock(rq, new_rq);
++ rq_curr = rq->curr;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child.
++ */
++ p->prio = rq_curr->normal_prio;
++
++ trace_sched_wakeup_new(p);
++
++ /*
++ * Share the timeslice between parent and child, thus the
++ * total amount of pending timeslices in the system doesn't change,
++ * resulting in more scheduling fairness. If it's negative, it won't
++ * matter since that's the same as being 0. rq->rq_deadline is only
++ * modified within schedule() so it is always equal to
++ * current->deadline.
++ */
++ account_task_cpu(rq, rq_curr);
++ p->last_ran = rq_curr->last_ran;
++ if (likely(rq_curr->policy != SCHED_FIFO)) {
++ rq_curr->time_slice /= 2;
++ if (rq_curr->time_slice < RESCHED_US) {
++ /*
++ * Forking task has run out of timeslice. Reschedule it and
++ * start its child with a new time slice and deadline. The
++ * child will end up running first because its deadline will
++ * be slightly earlier.
++ */
++ __set_tsk_resched(rq_curr);
++ time_slice_expired(p, new_rq);
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++ else if (unlikely(rq != new_rq))
++ try_preempt(p, new_rq);
++ } else {
++ p->time_slice = rq_curr->time_slice;
++ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
++ /*
++ * The VM isn't cloned, so we're in a good position to
++ * do child-runs-first in anticipation of an exec. This
++ * usually avoids a lot of COW overhead.
++ */
++ __set_tsk_resched(rq_curr);
++ } else {
++ /*
++ * Adjust the hrexpiry since rq_curr will keep
++ * running and its timeslice has been shortened.
++ */
++ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
++ try_preempt(p, new_rq);
++ }
++ }
++ } else {
++ time_slice_expired(p, new_rq);
++ try_preempt(p, new_rq);
++ }
++ activate_task(p, new_rq);
++ double_rq_unlock(rq, new_rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
++
++void preempt_notifier_inc(void)
++{
++ static_key_slow_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++ static_key_slow_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ if (!static_key_false(&preempt_notifier_key))
++ WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++ hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++ /*
++ * Claim the task as running, we do this before switching to it
++ * such that any running task will have this set.
++ */
++ next->on_cpu = 1;
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->on_cpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ *
++ * In particular, the load of prev->state in finish_task_switch() must
++ * happen before this.
++ *
++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++ */
++ smp_store_release(&prev->on_cpu, 0);
++#endif
++}
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++
++#ifdef CONFIG_SMP
++ /*
++ * If prev was marked as migrating to another CPU in return_task, drop
++ * the local runqueue lock but leave interrupts disabled and grab the
++ * remote lock we're migrating it to before enabling them.
++ */
++ if (unlikely(task_on_rq_migrating(prev))) {
++ sched_info_dequeued(rq, prev);
++ /*
++ * We move the ownership of prev to the new cpu now. ttwu can't
++ * activate prev to the wrong cpu since it has to grab this
++ * runqueue in ttwu_remote.
++ */
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ prev->cpu = prev->wake_cpu;
++#else
++ task_thread_info(prev)->cpu = prev->wake_cpu;
++#endif
++ raw_spin_unlock(rq->lock);
++
++ raw_spin_lock(&prev->pi_lock);
++ rq = __task_rq_lock(prev);
++ /* Check that someone else hasn't already queued prev */
++ if (likely(!task_queued(prev))) {
++ enqueue_task(rq, prev, 0);
++ prev->on_rq = TASK_ON_RQ_QUEUED;
++ /* Wake up the CPU if it's not already running */
++ resched_if_idle(rq);
++ }
++ raw_spin_unlock(&prev->pi_lock);
++ }
++#endif
++ rq_unlock(rq);
++
++ do_pending_softirq(rq, current);
++
++ local_irq_enable();
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ sched_info_switch(rq, prev, next);
++ perf_event_task_sched_out(prev, next);
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_task(next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static void finish_task_switch(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ /*
++ * The previous task will have left us with a preempt_count of 2
++ * because it left us after:
++ *
++ * schedule()
++ * preempt_disable(); // 1
++ * __schedule()
++ * raw_spin_lock_irq(rq->lock) // 2
++ *
++ * Also, see FORK_PREEMPT_COUNT.
++ */
++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++ "corrupted preempt_count: %s/%d/0x%x\n",
++ current->comm, current->pid, preempt_count()))
++ preempt_count_set(FORK_PREEMPT_COUNT);
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ *
++ * We must observe prev->state before clearing prev->on_cpu (in
++ * finish_task), otherwise a concurrent wakeup can get prev
++ * running on another CPU and we could rave with its RUNNING -> DEAD
++ * transition, resulting in a double drop.
++ */
++ prev_state = prev->state;
++ vtime_task_switch(prev);
++ perf_event_task_sched_in(prev, current);
++ finish_task(prev);
++ finish_lock_switch(rq, prev);
++ finish_arch_post_lock_switch();
++
++ fire_sched_in_preempt_notifiers(current);
++ /*
++ * When switching through a kernel thread, the loop in
++ * membarrier_{private,global}_expedited() may have observed that
++ * kernel thread and not issued an IPI. It is therefore possible to
++ * schedule between user->kernel->user threads without passing though
++ * switch_mm(). Membarrier requires a barrier after storing to
++ * rq->curr, before returning to userspace, so provide them here:
++ *
++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++ * provided by mmdrop(),
++ * - a sync_core for SYNC_CORE.
++ */
++ if (mm) {
++ membarrier_mm_sync_core_before_usermode(mm);
++ mmdrop(mm);
++ }
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++
++ /* Task is done with its stack. */
++ put_task_stack(prev);
++
++ put_task_struct(prev);
++ }
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++{
++ /*
++ * New tasks start with FORK_PREEMPT_COUNT, see there and
++ * finish_task_switch() for details.
++ *
++ * finish_task_switch() will drop rq->lock() and lower preempt_count
++ * and the preempt_enable() will end up enabling preemption (on
++ * PREEMPT_COUNT kernels).
++ */
++
++ finish_task_switch(prev);
++ preempt_enable();
++
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline void
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_start_context_switch(prev);
++
++ /*
++ * If mm is non-NULL, we pass through switch_mm(). If mm is
++ * NULL, we will pass through mmdrop() in finish_task_switch().
++ * Both of these contain the full memory barrier required by
++ * membarrier after storing to rq->curr, before returning to
++ * user-space.
++ */
++ if (!mm) {
++ next->active_mm = oldmm;
++ mmgrab(oldmm);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm_irqs_off(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++ prepare_lock_switch(rq, next);
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++ barrier();
++
++ finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++static unsigned long nr_uninterruptible(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_uninterruptible;
++
++ return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race. The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptable section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++ struct rq *rq = cpu_rq(smp_processor_id());
++
++ if (rq_load(rq) == 1)
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++/*
++ * IO-wait accounting, and how its mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpufreq menu
++ * governor are using nonsensical data. Boosting frequency for a CPU that has
++ * IO-wait which might not even end up running the task when it does become
++ * runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++ struct rq *this = cpu_rq(cpu);
++ return atomic_read(&this->nr_iowait);
++}
++
++unsigned long nr_active(void)
++{
++ return nr_running() + nr_uninterruptible();
++}
++
++/*
++ * I/O wait is the number of running or queued tasks with their ->rq pointer
++ * set to this cpu as being the CPU they're more likely to run on.
++ */
++void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
++{
++ struct rq *rq = this_rq();
++
++ *nr_waiters = atomic_read(&rq->nr_iowait);
++ *load = rq_load(rq);
++}
++
++/* Variables and functions for calc_load */
++static unsigned long calc_load_update;
++unsigned long avenrun[3];
++EXPORT_SYMBOL(avenrun);
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
++
++static unsigned long
++calc_load(unsigned long load, unsigned long exp, unsigned long active)
++{
++ unsigned long newload;
++
++ newload = load * exp + active * (FIXED_1 - exp);
++ if (active >= load)
++ newload += FIXED_1-1;
++
++ return newload / FIXED_1;
++}
++
++/*
++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
++ */
++void calc_global_load(unsigned long ticks)
++{
++ long active;
++
++ if (time_before(jiffies, READ_ONCE(calc_load_update)))
++ return;
++ active = nr_active() * FIXED_1;
++
++ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
++ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
++ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
++
++ calc_load_update = jiffies + LOAD_FREQ;
++}
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++#ifdef CONFIG_PARAVIRT
++static inline u64 steal_ticks(u64 steal)
++{
++ if (unlikely(steal > NSEC_PER_SEC))
++ return div_u64(steal, TICK_NSEC);
++
++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
++}
++#endif
++
++#ifndef nsecs_to_cputime
++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
++#endif
++
++/*
++ * On each tick, add the number of nanoseconds to the unbanked variables and
++ * once one tick's worth has accumulated, account it allowing for accurate
++ * sub-tick accounting and totals.
++ */
++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ if (atomic_read(&rq->nr_iowait) > 0) {
++ rq->iowait_ns += ns;
++ if (rq->iowait_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->iowait_ns);
++ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_NSEC * ticks;
++ rq->iowait_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->idle_ns += ns;
++ if (rq->idle_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->idle_ns);
++ cpustat[CPUTIME_IDLE] += (__force u64)TICK_NSEC * ticks;
++ rq->idle_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(idle);
++}
++
++static void pc_system_time(struct rq *rq, struct task_struct *p,
++ int hardirq_offset, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->stime_ns += ns;
++ if (p->stime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->stime_ns);
++ p->stime_ns %= JIFFY_NS;
++ p->stime += (__force u64)TICK_NSEC * ticks;
++ account_group_system_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (hardirq_count() - hardirq_offset) {
++ rq->irq_ns += ns;
++ if (rq->irq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->irq_ns);
++ cpustat[CPUTIME_IRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->irq_ns %= JIFFY_NS;
++ }
++ } else if (in_serving_softirq()) {
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->system_ns += ns;
++ if (rq->system_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->system_ns);
++ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_NSEC * ticks;
++ rq->system_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->utime_ns += ns;
++ if (p->utime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->utime_ns);
++ p->utime_ns %= JIFFY_NS;
++ p->utime += (__force u64)TICK_NSEC * ticks;
++ account_group_user_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (this_cpu_ksoftirqd() == p) {
++ /*
++ * ksoftirqd time do not get accounted in cpu_softirq_time.
++ * So, we have to handle it separately here.
++ */
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ }
++
++ if (task_nice(p) > 0 || idleprio_task(p)) {
++ rq->nice_ns += ns;
++ if (rq->nice_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->nice_ns);
++ cpustat[CPUTIME_NICE] += (__force u64)TICK_NSEC * ticks;
++ rq->nice_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->user_ns += ns;
++ if (rq->user_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->user_ns);
++ cpustat[CPUTIME_USER] += (__force u64)TICK_NSEC * ticks;
++ rq->user_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++/*
++ * This is called on clock ticks.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate tick timekeeping */
++ if (user_mode(get_irq_regs()))
++ pc_user_time(rq, p, account_ns);
++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
++ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
++ } else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++
++ p->last_ran = rq->niffies;
++}
++
++/*
++ * This is called on context switches.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate subtick timekeeping */
++ if (p != idle)
++ pc_user_time(rq, p, account_ns);
++ else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++}
++
++/*
++ * Return any ns on the sched_clock that have not yet been accounted in
++ * @p in case that task is currently running.
++ *
++ * Called with task_rq_lock(p) held.
++ */
++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
++{
++ u64 ns = 0;
++
++ /*
++ * Must be ->curr _and_ ->on_rq. If dequeued, we would
++ * project cycles that may never be accounted to this
++ * thread, breaking clock_gettime().
++ */
++ if (p == rq->curr && task_on_rq_queued(p)) {
++ update_clocks(rq);
++ ns = rq->niffies - p->last_ran;
++ }
++
++ return ns;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ *
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++ /*
++ * 64-bit doesn't need locks to atomically read a 64bit value.
++ * So we have a optimization chance when the task's delta_exec is 0.
++ * Reading ->on_cpu is racy, but this is ok.
++ *
++ * If we race with it leaving CPU, we'll take a lock. So we're correct.
++ * If we race with it entering CPU, unaccounted time is 0. This is
++ * indistinguishable from the read occurring a few cycles earlier.
++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++ * been accounted, so we're correct here as well.
++ */
++ if (!p->on_cpu || !task_on_rq_queued(p))
++ return tsk_seruntime(p);
++#endif
++
++ rq = task_rq_lock(p, &flags);
++ ns = p->sched_time + do_task_delta_exec(p, rq);
++ task_rq_unlock(rq, p, &flags);
++
++ return ns;
++}
++
++/*
++ * Functions to test for when SCHED_ISO tasks have used their allocated
++ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
++ * data is modified only by the local runqueue during scheduler_tick with
++ * interrupts disabled.
++ */
++
++/*
++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
++ * tasks and set the refractory flag if necessary. There is 10% hysteresis
++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
++ * slow division.
++ */
++static inline void iso_tick(struct rq *rq)
++{
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
++ rq->iso_ticks += 100;
++ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
++ rq->iso_refractory = true;
++ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
++ rq->iso_ticks = ISO_PERIOD * 100;
++ }
++}
++
++/* No SCHED_ISO task was running so decrease rq->iso_ticks */
++static inline void no_iso_tick(struct rq *rq, int ticks)
++{
++ if (rq->iso_ticks > 0 || rq->iso_refractory) {
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
++ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
++ rq->iso_refractory = false;
++ if (unlikely(rq->iso_ticks < 0))
++ rq->iso_ticks = 0;
++ }
++ }
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static void task_running_tick(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ /*
++ * If a SCHED_ISO task is running we increment the iso_ticks. In
++ * order to prevent SCHED_ISO tasks from causing starvation in the
++ * presence of true RT tasks we account those as iso_ticks as well.
++ */
++ if (rt_task(p) || task_running_iso(p))
++ iso_tick(rq);
++ else
++ no_iso_tick(rq, 1);
++
++ /* SCHED_FIFO tasks never run out of timeslice. */
++ if (p->policy == SCHED_FIFO)
++ return;
++
++ if (iso_task(p)) {
++ if (task_running_iso(p)) {
++ if (rq->iso_refractory) {
++ /*
++ * SCHED_ISO task is running as RT and limit
++ * has been hit. Force it to reschedule as
++ * SCHED_NORMAL by zeroing its time_slice
++ */
++ p->time_slice = 0;
++ }
++ } else if (!rq->iso_refractory) {
++ /* Can now run again ISO. Reschedule to pick up prio */
++ goto out_resched;
++ }
++ }
++
++ /*
++ * Tasks that were scheduled in the first half of a tick are not
++ * allowed to run into the 2nd half of the next tick if they will
++ * run out of time slice in the interim. Otherwise, if they have
++ * less than RESCHED_US μs of time slice left they will be rescheduled.
++ * Dither is used as a backup for when hrexpiry is disabled or high res
++ * timers not configured in.
++ */
++ if (p->time_slice - rq->dither >= RESCHED_US)
++ return;
++out_resched:
++ rq_lock(rq);
++ __set_tsk_resched(p);
++ rq_unlock(rq);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * We can stop the timer tick any time highres timers are active since
++ * we rely entirely on highres timeouts for task expiry rescheduling.
++ */
++static void sched_stop_tick(struct rq *rq, int cpu)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (!tick_nohz_full_enabled())
++ return;
++ if (!tick_nohz_full_cpu(cpu))
++ return;
++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++/**
++ * scheduler_tick_max_deferment
++ *
++ * Keep at least one tick per second when a single
++ * active task is running.
++ *
++ * This makes sure that uptime continues to move forward, even
++ * with a very low granularity.
++ *
++ * Return: Maximum deferment in nanoseconds.
++ */
++u64 scheduler_tick_max_deferment(void)
++{
++ struct rq *rq = this_rq();
++ unsigned long next, now = READ_ONCE(jiffies);
++
++ next = rq->last_jiffy + HZ;
++
++ if (time_before_eq(next, now))
++ return 0;
++
++ return jiffies_to_nsecs(next - now);
++}
++#else
++static inline void sched_stop_tick(struct rq *rq, int cpu)
++{
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++}
++#endif
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++ int cpu __maybe_unused = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++
++ sched_clock_tick();
++ update_clocks(rq);
++ update_load_avg(rq, 0);
++ update_cpu_clock_tick(rq, rq->curr);
++ if (!rq_idle(rq))
++ task_running_tick(rq);
++ else if (rq->last_jiffy > rq->last_scheduler_tick)
++ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
++ rq->last_scheduler_tick = rq->last_jiffy;
++ rq->last_tick = rq->clock;
++ perf_event_task_tick();
++ sched_stop_tick(rq, cpu);
++}
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++ if (preempt_count() == val) {
++ unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ __preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ preempt_latency_stop(val);
++ __preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ return p->preempt_disable_ip;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * The time_slice is only refilled when it is empty and that is when we set a
++ * new deadline. Make sure update_clocks has been called recently to update
++ * rq->niffies.
++ */
++static void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++ p->time_slice = timeslice();
++ p->deadline = rq->niffies + task_deadline_diff(p);
++#ifdef CONFIG_SMT_NICE
++ if (!p->mm)
++ p->smt_bias = 0;
++ else if (rt_task(p))
++ p->smt_bias = 1 << 30;
++ else if (task_running_iso(p))
++ p->smt_bias = 1 << 29;
++ else if (idleprio_task(p)) {
++ if (task_running_idle(p))
++ p->smt_bias = 0;
++ else
++ p->smt_bias = 1;
++ } else if (--p->smt_bias < 1)
++ p->smt_bias = MAX_PRIO - p->static_prio;
++#endif
++}
++
++/*
++ * Timeslices below RESCHED_US are considered as good as expired as there's no
++ * point rescheduling when there's so little time left. SCHED_BATCH tasks
++ * have been flagged be not latency sensitive and likely to be fully CPU
++ * bound so every time they're rescheduled they have their time_slice
++ * refilled, but get a new later deadline to have little effect on
++ * SCHED_NORMAL tasks.
++
++ */
++static inline void check_deadline(struct task_struct *p, struct rq *rq)
++{
++ if (p->time_slice < RESCHED_US || batch_task(p))
++ time_slice_expired(p, rq);
++}
++
++/*
++ * Task selection with skiplists is a simple matter of picking off the first
++ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
++ * being bound to the number of processors.
++ *
++ * Runqueues are selectively locked based on their unlocked data and then
++ * unlocked if not needed. At most 3 locks will be held at any time and are
++ * released as soon as they're no longer needed. All balancing between CPUs
++ * is thus done here in an extremely simple first come best fit manner.
++ *
++ * This iterates over runqueues in cache locality order. In interactive mode
++ * it iterates over all CPUs and finds the task with the best key/deadline.
++ * In non-interactive mode it will only take a task if it's from the current
++ * runqueue or a runqueue with more tasks than the current one with a better
++ * key/deadline.
++ */
++#ifdef CONFIG_SMP
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct rq *locked = NULL, *chosen = NULL;
++ struct task_struct *edt = idle;
++ int i, best_entries = 0;
++ u64 best_key = ~0ULL;
++
++ for (i = 0; i < total_runqueues; i++) {
++ struct rq *other_rq = rq_order(rq, i);
++ skiplist_node *next;
++ int entries;
++
++ entries = other_rq->sl->entries;
++ /*
++ * Check for queued entres lockless first. The local runqueue
++ * is locked so entries will always be accurate.
++ */
++ if (!sched_interactive) {
++ /*
++ * Don't reschedule balance across nodes unless the CPU
++ * is idle.
++ */
++ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3)
++ break;
++ if (entries <= best_entries)
++ continue;
++ } else if (!entries)
++ continue;
++
++ /* if (i) implies other_rq != rq */
++ if (i) {
++ /* Check for best id queued lockless first */
++ if (other_rq->best_key >= best_key)
++ continue;
++
++ if (unlikely(!trylock_rq(rq, other_rq)))
++ continue;
++
++ /* Need to reevaluate entries after locking */
++ entries = other_rq->sl->entries;
++ if (unlikely(!entries)) {
++ unlock_rq(other_rq);
++ continue;
++ }
++ }
++
++ next = other_rq->node;
++ /*
++ * In interactive mode we check beyond the best entry on other
++ * runqueues if we can't get the best for smt or affinity
++ * reasons.
++ */
++ while ((next = next->next[0]) != other_rq->node) {
++ struct task_struct *p;
++ u64 key = next->key;
++
++ /* Reevaluate key after locking */
++ if (key >= best_key)
++ break;
++
++ p = next->value;
++ if (!smt_schedule(p, rq)) {
++ if (i && !sched_interactive)
++ break;
++ continue;
++ }
++
++ if (sched_other_cpu(p, cpu)) {
++ if (sched_interactive || !i)
++ continue;
++ break;
++ }
++ /* Make sure affinity is ok */
++ if (i) {
++ /* From this point on p is the best so far */
++ if (locked)
++ unlock_rq(locked);
++ chosen = locked = other_rq;
++ }
++ best_entries = entries;
++ best_key = key;
++ edt = p;
++ break;
++ }
++ /* rq->preempting is a hint only as the state may have changed
++ * since it was set with the resched call but if we have met
++ * the condition we can break out here. */
++ if (edt == rq->preempting)
++ break;
++ if (i && other_rq != chosen)
++ unlock_rq(other_rq);
++ }
++
++ if (likely(edt != idle))
++ take_task(rq, cpu, edt);
++
++ if (locked)
++ unlock_rq(locked);
++
++ rq->preempting = NULL;
++
++ return edt;
++}
++#else /* CONFIG_SMP */
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct task_struct *edt;
++
++ if (unlikely(!rq->sl->entries))
++ return idle;
++ edt = rq->node->next[0]->value;
++ take_task(rq, cpu, edt);
++ return edt;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ /* Save this before calling printk(), since that will clobber it */
++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++ if (oops_in_progress)
++ return;
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++ if (task_stack_end_corrupted(prev))
++ panic("corrupted stack end detected inside scheduler\n");
++#endif
++
++ if (unlikely(in_atomic_preempt_off())) {
++ __schedule_bug(prev);
++ preempt_count_set(PREEMPT_DISABLED);
++ }
++ rcu_sleep_check();
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * The currently running task's information is all stored in rq local data
++ * which is only modified by the local CPU.
++ */
++static inline void set_rq_task(struct rq *rq, struct task_struct *p)
++{
++ if (p == rq->idle || p->policy == SCHED_FIFO)
++ hrexpiry_clear(rq);
++ else
++ hrexpiry_start(rq, US_TO_NS(p->time_slice));
++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
++ rq->dither = 0;
++ else
++ rq->dither = rq_dither(rq);
++
++ rq->rq_deadline = p->deadline;
++ rq->rq_prio = p->prio;
++#ifdef CONFIG_SMT_NICE
++ rq->rq_mm = p->mm;
++ rq->rq_smt_bias = p->smt_bias;
++#endif
++}
++
++#ifdef CONFIG_SMT_NICE
++static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
++static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
++
++/* Iterate over smt siblings when we've scheduled a process on cpu and decide
++ * whether they should continue running or be descheduled. */
++static void check_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct task_struct *p;
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ continue;
++ p = rq->curr;
++ if (!smt_schedule(p, this_rq))
++ resched_curr(rq);
++ }
++}
++
++static void wake_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ resched_idle(rq);
++ }
++}
++#else
++static void check_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_siblings(struct rq __maybe_unused *this_rq) {}
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ * paths. For example, see arch/x86/entry_64.S.
++ *
++ * To drive preemption between tasks, the scheduler sets the flag in timer
++ * interrupt handler scheduler_tick().
++ *
++ * 3. Wakeups don't really cause entry into schedule(). They add a
++ * task to the run-queue and that's it.
++ *
++ * Now, if the new task added to the run-queue preempts the current
++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ * called on the nearest possible occasion:
++ *
++ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
++ *
++ * - in syscall or exception context, at the next outmost
++ * preempt_enable(). (this might be as soon as the wake_up()'s
++ * spin_unlock()!)
++ *
++ * - in IRQ context, return from interrupt-handler to
++ * preemptible context
++ *
++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
++ * then at the next:
++ *
++ * - cond_resched() call
++ * - explicit schedule() call
++ * - return from syscall or exception to user-space
++ * - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++ struct task_struct *prev, *next, *idle;
++ unsigned long *switch_count;
++ bool deactivate = false;
++ struct rq *rq;
++ u64 niffies;
++ int cpu;
++
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ prev = rq->curr;
++ idle = rq->idle;
++
++ schedule_debug(prev);
++
++ local_irq_disable();
++ rcu_note_context_switch(preempt);
++
++ /*
++ * Make sure that signal_pending_state()->signal_pending() below
++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++ * done by the caller to avoid the race with signal_wake_up().
++ *
++ * The membarrier system call requires a full memory barrier
++ * after coming from user-space, before storing to rq->curr.
++ */
++ rq_lock(rq);
++ smp_mb__after_spinlock();
++#ifdef CONFIG_SMP
++ if (rq->preempt) {
++ /*
++ * Make sure resched_curr hasn't triggered a preemption
++ * locklessly on a task that has since scheduled away. Spurious
++ * wakeup of idle is okay though.
++ */
++ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
++ rq->preempt = NULL;
++ clear_preempt_need_resched();
++ rq_unlock_irq(rq);
++ return;
++ }
++ rq->preempt = NULL;
++ }
++#endif
++
++ switch_count = &prev->nivcsw;
++ if (!preempt && prev->state) {
++ if (unlikely(signal_pending_state(prev->state, prev))) {
++ prev->state = TASK_RUNNING;
++ } else {
++ deactivate = true;
++ prev->on_rq = 0;
++
++ if (prev->in_iowait) {
++ atomic_inc(&rq->nr_iowait);
++ delayacct_blkio_start();
++ }
++
++ /*
++ * If a worker is going to sleep, notify and
++ * ask workqueue whether it wants to wake up a
++ * task to maintain concurrency. If so, wake
++ * up the task.
++ */
++ if (prev->flags & PF_WQ_WORKER) {
++ struct task_struct *to_wakeup;
++
++ to_wakeup = wq_worker_sleeping(prev);
++ if (to_wakeup)
++ try_to_wake_up_local(to_wakeup);
++ }
++ }
++ switch_count = &prev->nvcsw;
++ }
++
++ /*
++ * Store the niffy value here for use by the next task's last_ran
++ * below to avoid losing niffies due to update_clocks being called
++ * again after this point.
++ */
++ update_clocks(rq);
++ niffies = rq->niffies;
++ update_cpu_clock_switch(rq, prev);
++
++ clear_tsk_need_resched(prev);
++ clear_preempt_need_resched();
++
++ if (idle != prev) {
++ check_deadline(prev, rq);
++ return_task(prev, rq, cpu, deactivate);
++ }
++
++ next = earliest_deadline_task(rq, cpu, idle);
++ if (likely(next->prio != PRIO_LIMIT))
++ clear_cpuidle_map(cpu);
++ else {
++ set_cpuidle_map(cpu);
++ update_load_avg(rq, 0);
++ }
++
++ set_rq_task(rq, next);
++ next->last_ran = niffies;
++
++ if (likely(prev != next)) {
++ /*
++ * Don't reschedule an idle task or deactivated tasks
++ */
++ if (prev == idle)
++ rq->nr_running++;
++ else if (!deactivate)
++ resched_suitable_idle(prev);
++ if (unlikely(next == idle)) {
++ rq->nr_running--;
++ wake_siblings(rq);
++ } else
++ check_siblings(rq);
++ rq->nr_switches++;
++ rq->curr = next;
++ /*
++ * The membarrier system call requires each architecture
++ * to have a full memory barrier after updating
++ * rq->curr, before returning to user-space.
++ *
++ * Here are the schemes providing that barrier on the
++ * various architectures:
++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++ * - finish_lock_switch() for weakly-ordered
++ * architectures where spin_unlock is a full barrier,
++ * - switch_to() for arm64 (weakly-ordered, spin_unlock
++ * is a RELEASE barrier),
++ */
++ ++*switch_count;
++
++ trace_sched_switch(preempt, prev, next);
++ context_switch(rq, prev, next); /* unlocks the rq */
++ } else {
++ check_siblings(rq);
++ rq_unlock(rq);
++ do_pending_softirq(rq, next);
++ local_irq_enable();
++ }
++}
++
++void __noreturn do_task_dead(void)
++{
++ /*
++ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
++ * when the following two conditions become true.
++ * - There is race condition of mmap_sem (It is acquired by
++ * exit_mm()), and
++ * - SMI occurs before setting TASK_RUNINNG.
++ * (or hypervisor of virtual machine switches to other guest)
++ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
++ *
++ * To avoid it, we have to wait for releasing tsk->pi_lock which
++ * is held by try_to_wake_up()
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ /* Causes final put_task_struct in finish_task_switch(). */
++ __set_current_state(TASK_DEAD);
++
++ /* Tell freezer to ignore us: */
++ current->flags |= PF_NOFREEZE;
++ __schedule(false);
++ BUG();
++
++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++ for (;;)
++ cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
++ preempt_count() ||
++ signal_pending_state(tsk->state, tsk))
++ return;
++
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ do {
++ preempt_disable();
++ __schedule(false);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++}
++
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++ /*
++ * As this skips calling sched_submit_work(), which the idle task does
++ * regardless because that function is a nop when the task is in a
++ * TASK_RUNNING state, make sure this isn't used someplace that the
++ * current task can be in any other state. Note, idle is always in the
++ * TASK_RUNNING state.
++ */
++ WARN_ON_ONCE(current->state);
++ do {
++ __schedule(false);
++ } while (need_resched());
++}
++
++#ifdef CONFIG_CONTEXT_TRACKING
++asmlinkage __visible void __sched schedule_user(void)
++{
++ /*
++ * If we come here after a random call to set_need_resched(),
++ * or we have been woken up remotely but the IPI has not yet arrived,
++ * we haven't yet exited the RCU idle mode. Do it here manually until
++ * we find a better solution.
++ *
++ * NB: There are buggy callers of this function. Ideally we
++ * should warn if prev_state != IN_USER, but that will trigger
++ * too frequently to make sense yet.
++ */
++ enum ctx_state prev_state = exception_enter();
++ schedule();
++ exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ sched_preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ __schedule(true);
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ } while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(!preemptible()))
++ return;
++
++ preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++ enum ctx_state prev_ctx;
++
++ if (likely(!preemptible()))
++ return;
++
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ /*
++ * Needs preempt disabled in case user_exit() is traced
++ * and the tracer calls preempt_enable_notrace() causing
++ * an infinite recursion.
++ */
++ prev_ctx = exception_enter();
++ __schedule(true);
++ exception_exit(prev_ctx);
++
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++ } while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#endif /* CONFIG_PREEMPT */
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++ enum ctx_state prev_state;
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(preempt_count() || !irqs_disabled());
++
++ prev_state = exception_enter();
++
++ do {
++ preempt_disable();
++ local_irq_enable();
++ __schedule(true);
++ local_irq_disable();
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++
++ exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++ if (pi_task)
++ prio = min(prio, pi_task->prio);
++
++ return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++ int prio, oldprio;
++ struct rq *rq;
++
++ /* XXX used to be waiter->prio, not waiter->task->prio */
++ prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++ /*
++ * If nothing changed; bail early.
++ */
++ if (p->pi_top_task == pi_task && prio == p->prio)
++ return;
++
++ rq = __task_rq_lock(p);
++ update_rq_clock(rq);
++ /*
++ * Set under pi_lock && rq->lock, such that the value can be used under
++ * either lock.
++ *
++ * Note that there is loads of tricky to make this pointer cache work
++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++ * ensure a task is de-boosted (pi_task is set to NULL) before the
++ * task is allowed to run again (and can exit). This ensures the pointer
++ * points to a blocked task -- which guaratees the task is present.
++ */
++ p->pi_top_task = pi_task;
++
++ /*
++ * For FIFO/RR we only need to set prio, if that matches we're done.
++ */
++ if (prio == p->prio)
++ goto out_unlock;
++
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
++ trace_sched_pi_setprio(p, pi_task);
++ oldprio = p->prio;
++ p->prio = prio;
++ if (task_running(rq, p)){
++ if (prio > oldprio)
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (prio < oldprio)
++ try_preempt(p, rq);
++ }
++out_unlock:
++ __task_rq_unlock(rq);
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ return prio;
++}
++#endif
++
++/*
++ * Adjust the deadline for when the priority is to change, before it's
++ * changed.
++ */
++static inline void adjust_deadline(struct task_struct *p, int new_prio)
++{
++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
++}
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int new_static, old_static;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++ return;
++ new_static = NICE_TO_PRIO(nice);
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * not SCHED_NORMAL/SCHED_BATCH:
++ */
++ if (has_rt_policy(p)) {
++ p->static_prio = new_static;
++ goto out_unlock;
++ }
++
++ adjust_deadline(p, new_static);
++ old_static = p->static_prio;
++ p->static_prio = new_static;
++ p->prio = effective_prio(p);
++
++ if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (new_static < old_static)
++ try_preempt(p, rq);
++ } else if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ if (old_static < new_static)
++ resched_task(p);
++ }
++out_unlock:
++ task_rq_unlock(rq, p, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* Convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = nice_to_rlimit(nice);
++
++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++
++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++ nice = task_nice(current) + increment;
++
++ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++ if (increment < 0 && !can_nice(current, nice))
++ return -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
++ */
++int task_prio(const struct task_struct *p)
++{
++ int delta, prio = p->prio - MAX_RT_PRIO;
++
++ /* rt tasks and iso tasks */
++ if (prio <= 0)
++ goto out;
++
++ /* Convert to ms to avoid overflows */
++ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
++ if (unlikely(delta < 0))
++ delta = 0;
++ delta = delta * 40 / ms_longest_deadline_diff();
++ if (delta <= 80)
++ prio += delta;
++ if (idleprio_task(p))
++ prio += 40;
++out:
++ return prio;
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the CPU @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
++ int prio, bool keep_boost)
++{
++ int oldrtprio, oldprio;
++
++ p->policy = policy;
++ oldrtprio = p->rt_priority;
++ p->rt_priority = prio;
++ p->normal_prio = normal_prio(p);
++ oldprio = p->prio;
++ /*
++ * Keep a potential priority boosting if called from
++ * sched_setscheduler().
++ */
++ p->prio = normal_prio(p);
++ if (keep_boost)
++ p->prio = rt_effective_prio(p, p->prio);
++
++ if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (p->prio < oldprio || p->rt_priority > oldrtprio)
++ try_preempt(p, rq);
++ }
++}
++
++/*
++ * Check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++ const struct cred *cred = current_cred(), *pcred;
++ bool match;
++
++ rcu_read_lock();
++ pcred = __task_cred(p);
++ match = (uid_eq(cred->euid, pcred->euid) ||
++ uid_eq(cred->euid, pcred->uid));
++ rcu_read_unlock();
++ return match;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++ const struct sched_attr *attr,
++ bool user, bool pi)
++{
++ int retval, policy = attr->sched_policy, oldpolicy = -1, priority = attr->sched_priority;
++ unsigned long flags, rlim_rtprio = 0;
++ int reset_on_fork;
++ struct rq *rq;
++
++ /* The pi code expects interrupts enabled */
++ BUG_ON(pi && in_interrupt());
++
++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
++ unsigned long lflags;
++
++ if (!lock_task_sighand(p, &lflags))
++ return -ESRCH;
++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++ unlock_task_sighand(p, &lflags);
++ if (rlim_rtprio)
++ goto recheck;
++ /*
++ * If the caller requested an RT policy without having the
++ * necessary rights, we downgrade the policy to SCHED_ISO.
++ * We also set the parameter to zero to pass the checks.
++ */
++ policy = SCHED_ISO;
++ priority = 0;
++ }
++recheck:
++ /* Double check policy once rq lock held */
++ if (policy < 0) {
++ reset_on_fork = p->sched_reset_on_fork;
++ policy = oldpolicy = p->policy;
++ } else {
++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
++ policy &= ~SCHED_RESET_ON_FORK;
++
++ if (!SCHED_RANGE(policy))
++ return -EINVAL;
++ }
++
++ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
++ return -EINVAL;
++
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++ * SCHED_BATCH is 0.
++ */
++ if (priority < 0 ||
++ (p->mm && priority > MAX_USER_RT_PRIO - 1) ||
++ (!p->mm && priority > MAX_RT_PRIO - 1))
++ return -EINVAL;
++ if (is_rt_policy(policy) != (priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (is_rt_policy(policy)) {
++ unsigned long rlim_rtprio =
++ task_rlimit(p, RLIMIT_RTPRIO);
++
++ /* Can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* Can't increase priority */
++ if (priority > p->rt_priority &&
++ priority > rlim_rtprio)
++ return -EPERM;
++ } else {
++ switch (p->policy) {
++ /*
++ * Can only downgrade policies but not back to
++ * SCHED_NORMAL
++ */
++ case SCHED_ISO:
++ if (policy == SCHED_ISO)
++ goto out;
++ if (policy != SCHED_NORMAL)
++ return -EPERM;
++ break;
++ case SCHED_BATCH:
++ if (policy == SCHED_BATCH)
++ goto out;
++ if (policy != SCHED_IDLEPRIO)
++ return -EPERM;
++ break;
++ case SCHED_IDLEPRIO:
++ if (policy == SCHED_IDLEPRIO)
++ goto out;
++ return -EPERM;
++ default:
++ break;
++ }
++ }
++
++ /* Can't change other user's priorities */
++ if (!check_same_owner(p))
++ return -EPERM;
++
++ /* Normal users shall not reset the sched_reset_on_fork flag: */
++ if (p->sched_reset_on_fork && !reset_on_fork)
++ return -EPERM;
++ }
++
++ if (user) {
++ retval = security_task_setscheduler(p);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * Make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ *
++ * To be able to change p->policy safely, the runqueue lock must be
++ * held.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * Changing the policy of the stop threads its a very bad idea:
++ */
++ if (p == rq->stop) {
++ task_rq_unlock(rq, p, &flags);
++ return -EINVAL;
++ }
++
++ /*
++ * If not changing anything there's no need to proceed further:
++ */
++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
++ priority == p->rt_priority))) {
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++
++ /* Re-check policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ task_rq_unlock(rq, p, &flags);
++ goto recheck;
++ }
++ p->sched_reset_on_fork = reset_on_fork;
++
++ __setscheduler(p, rq, policy, priority, pi);
++ task_rq_unlock(rq, p, &flags);
++
++ if (pi)
++ rt_mutex_adjust_pi(p);
++out:
++ return 0;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param, bool check)
++{
++ struct sched_attr attr = {
++ .sched_policy = policy,
++ .sched_priority = param->sched_priority,
++ .sched_nice = PRIO_TO_NICE(p->static_prio),
++ };
++
++ return __sched_setscheduler(p, &attr, check, true);
++}
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, true);
++}
++
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++ return __sched_setscheduler(p, attr, true, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr);
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++ return __sched_setscheduler(p, attr, false, true);
++}
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, false);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /* Zero the full structure, so that a short copy will be nice: */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ /* Bail out on silly large: */
++ if (size > PAGE_SIZE)
++ goto err_size;
++
++ /* ABI compatibility quirk: */
++ if (!size)
++ size = SCHED_ATTR_SIZE_VER0;
++
++ if (size < SCHED_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ /*
++ * XXX: Do we want to be lenient like existing syscalls; or do we want
++ * to be strict and return an error on out-of-bounds values?
++ */
++ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return 0;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ return -E2BIG;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, flags)
++{
++ struct sched_attr attr;
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || flags)
++ return -EINVAL;
++
++ retval = sched_copy_attr(uattr, &attr);
++ if (retval)
++ return retval;
++
++ if ((int)attr.sched_policy < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setattr(p, &attr);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ rcu_read_unlock();
++
++out_nounlock:
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp = { .sched_priority = 0 };
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (!param || pid < 0)
++ goto out_nounlock;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ if (has_rt_policy(p))
++ lp.sched_priority = p->rt_priority;
++ rcu_read_unlock();
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static int sched_read_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr,
++ unsigned int usize)
++{
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, usize))
++ return -EFAULT;
++
++ /*
++ * If we're handed a smaller struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. old
++ * user-space does not get uncomplete information.
++ */
++ if (usize < sizeof(*attr)) {
++ unsigned char *addr;
++ unsigned char *end;
++
++ addr = (void *)attr + usize;
++ end = (void *)attr + sizeof(*attr);
++
++ for (; addr < end; addr++) {
++ if (*addr)
++ return -EFBIG;
++ }
++
++ attr->size = usize;
++ }
++
++ ret = copy_to_user(uattr, attr, attr->size);
++ if (ret)
++ return -EFAULT;
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return ret;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @size: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, size, unsigned int, flags)
++{
++ struct sched_attr attr = {
++ .size = sizeof(struct sched_attr),
++ };
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || size > PAGE_SIZE ||
++ size < SCHED_ATTR_SIZE_VER0 || flags)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ attr.sched_policy = p->policy;
++ if (rt_task(p))
++ attr.sched_priority = p->rt_priority;
++ else
++ attr.sched_nice = task_nice(p);
++
++ rcu_read_unlock();
++
++ retval = sched_read_attr(uattr, &attr, size);
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++ cpumask_var_t cpus_allowed, new_mask;
++ struct task_struct *p;
++ int retval;
++
++ rcu_read_lock();
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ rcu_read_unlock();
++ return -ESRCH;
++ }
++
++ /* Prevent p going away */
++ get_task_struct(p);
++ rcu_read_unlock();
++
++ if (p->flags & PF_NO_SETAFFINITY) {
++ retval = -EINVAL;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_free_cpus_allowed;
++ }
++ retval = -EPERM;
++ if (!check_same_owner(p)) {
++ rcu_read_lock();
++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++ rcu_read_unlock();
++ goto out_unlock;
++ }
++ rcu_read_unlock();
++ }
++
++ retval = security_task_setscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpuset_cpus_allowed(p, cpus_allowed);
++ cpumask_and(new_mask, in_mask, cpus_allowed);
++again:
++ retval = __set_cpus_allowed_ptr(p, new_mask, true);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, cpus_allowed);
++ if (!cpumask_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ cpumask_copy(new_mask, cpus_allowed);
++ goto again;
++ }
++ }
++out_unlock:
++ free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++ free_cpumask_var(cpus_allowed);
++out_put_task:
++ put_task_struct(p);
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ cpumask_t *new_mask)
++{
++ if (len < cpumask_size())
++ cpumask_clear(new_mask);
++ else if (len > cpumask_size())
++ len = cpumask_size();
++
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_var_t new_mask;
++ int retval;
++
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++ if (retval == 0)
++ retval = sched_setaffinity(pid, new_mask);
++ free_cpumask_var(new_mask);
++ return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ unsigned long flags;
++ int retval;
++
++ get_online_cpus();
++ rcu_read_lock();
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++out_unlock:
++ rcu_read_unlock();
++ put_online_cpus();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_var_t mask;
++
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
++ return -EINVAL;
++
++ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sched_getaffinity(pid, mask);
++ if (ret == 0) {
++ unsigned int retlen = min(len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
++ ret = -EFAULT;
++ else
++ ret = retlen;
++ }
++ free_cpumask_var(mask);
++
++ return ret;
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. It does this by
++ * scheduling away the current task. If it still has the earliest deadline
++ * it will be scheduled again as the next task.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++ struct rq *rq;
++
++ if (!sched_yield_type)
++ goto out;
++
++ local_irq_disable();
++ rq = this_rq();
++ rq_lock(rq);
++
++ if (sched_yield_type > 1)
++ time_slice_expired(current, rq);
++ schedstat_inc(rq->yld_count);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ preempt_disable();
++ rq_unlock(rq);
++ sched_preempt_enable_no_resched();
++
++ schedule();
++out:
++ return 0;
++}
++
++#ifndef CONFIG_PREEMPT
++int __sched _cond_resched(void)
++{
++ if (should_resched(0)) {
++ preempt_schedule_common();
++ return 1;
++ }
++ rcu_all_qs();
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
++ int ret = 0;
++
++ lockdep_assert_held(lock);
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched)
++ preempt_schedule_common();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __sched __cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
++ local_bh_enable();
++ preempt_schedule_common();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(__cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, its already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * Return:
++ * true (>0) if we indeed boosted the target task.
++ * false (0) if we failed to boost the target.
++ * -ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++ struct task_struct *rq_p;
++ struct rq *rq, *p_rq;
++ unsigned long flags;
++ int yielded = 0;
++
++ local_irq_save(flags);
++ rq = this_rq();
++
++again:
++ p_rq = task_rq(p);
++ /*
++ * If we're the only runnable task on the rq and target rq also
++ * has only one task, there's absolutely no point in yielding.
++ */
++ if (task_running(p_rq, p) || p->state) {
++ yielded = -ESRCH;
++ goto out_irq;
++ }
++
++ double_rq_lock(rq, p_rq);
++ if (unlikely(task_rq(p) != p_rq)) {
++ double_rq_unlock(rq, p_rq);
++ goto again;
++ }
++
++ yielded = 1;
++ schedstat_inc(rq->yld_count);
++ rq_p = rq->curr;
++ if (p->deadline > rq_p->deadline)
++ p->deadline = rq_p->deadline;
++ p->time_slice += rq_p->time_slice;
++ if (p->time_slice > timeslice())
++ p->time_slice = timeslice();
++ time_slice_expired(rq_p, rq);
++ if (preempt && rq != p_rq)
++ resched_task(p_rq->curr);
++ double_rq_unlock(rq, p_rq);
++out_irq:
++ local_irq_restore(flags);
++
++ if (yielded > 0)
++ schedule();
++ return yielded;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++ int old_iowait = current->in_iowait;
++
++ current->in_iowait = 1;
++ blk_schedule_flush_plug(current);
++
++ return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++ current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++ int token;
++ long ret;
++
++ token = io_schedule_prepare();
++ ret = schedule_timeout(timeout);
++ io_schedule_finish(token);
++
++ return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void io_schedule(void)
++{
++ int token;
++
++ token = io_schedule_prepare();
++ schedule();
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++ struct task_struct *p;
++ unsigned int time_slice;
++ unsigned long flags;
++ struct rq *rq;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ rq = task_rq_lock(p, &flags);
++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
++ task_rq_unlock(rq, p, &flags);
++
++ rcu_read_unlock();
++ *t = ns_to_timespec64(time_slice);
++ return 0;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ * this syscall writes the default timeslice value of a given process
++ * into the user-space timespec buffer. A value of '0' means infinity.
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct timespec __user *, interval)
++{
++ struct timespec64 t;
++ int retval = sched_rr_get_interval(pid, &t);
++
++ if (retval == 0)
++ retval = put_timespec64(&t, interval);
++
++ return retval;
++}
++
++#ifdef CONFIG_COMPAT
++COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
++ compat_pid_t, pid,
++ struct compat_timespec __user *, interval)
++{
++ struct timespec64 t;
++ int retval = sched_rr_get_interval(pid, &t);
++
++ if (retval == 0)
++ retval = compat_put_timespec64(&t, interval);
++ return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ int ppid;
++
++ if (!try_get_task_stack(p))
++ return;
++
++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
++
++ if (p->state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ free = stack_not_used(p);
++#endif
++ ppid = 0;
++ rcu_read_lock();
++ if (pid_alive(p))
++ ppid = task_pid_nr(rcu_dereference(p->real_parent));
++ rcu_read_unlock();
++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
++ task_pid_nr(p), ppid,
++ (unsigned long)task_thread_info(p)->flags);
++
++ print_worker_info(KERN_INFO, p);
++ show_stack(p, NULL);
++ put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++ /* no filter, everything matches */
++ if (!state_filter)
++ return true;
++
++ /* filter, but doesn't match */
++ if (!(p->state & state_filter))
++ return false;
++
++ /*
++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++ * TASK_KILLABLE).
++ */
++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++ return false;
++
++ return true;
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ rcu_read_lock();
++ for_each_process_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take a lot of time:
++ * Also, reset softlockup watchdogs on all CPUs, because
++ * another CPU might be blocked waiting for us to process
++ * an IPI.
++ */
++ touch_nmi_watchdog();
++ touch_all_softlockup_watchdogs();
++ if (state_filter_match(state_filter, p))
++ sched_show_task(p);
++ }
++
++ rcu_read_unlock();
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (!state_filter)
++ debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++ pr_info("Task dump for CPU %d:\n", cpu);
++ sched_show_task(cpu_curr(cpu));
++}
++
++#ifdef CONFIG_SMP
++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&p->pi_lock);
++
++ cpumask_copy(&p->cpus_allowed, new_mask);
++
++ if (task_queued(p)) {
++ /*
++ * Because __kthread_bind() calls this on blocked tasks without
++ * holding rq->lock.
++ */
++ lockdep_assert_held(rq->lock);
++ }
++}
++
++/*
++ * Calling do_set_cpus_allowed from outside the scheduler code should not be
++ * called on a running or queued task. We should be holding pi_lock.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ __do_set_cpus_allowed(p, new_mask);
++ if (needs_other_cpu(p, task_cpu(p))) {
++ struct rq *rq;
++
++ rq = __task_rq_lock(p);
++ set_task_cpu(p, valid_task_cpu(p));
++ resched_task(p);
++ __task_rq_unlock(rq);
++ }
++}
++#endif
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&idle->pi_lock, flags);
++ raw_spin_lock(rq->lock);
++ idle->last_ran = rq->niffies;
++ time_slice_expired(idle, rq);
++ idle->state = TASK_RUNNING;
++ /* Setting prio to illegal value shouldn't matter when never queued */
++ idle->prio = PRIO_LIMIT;
++
++ kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++ /*
++ * It's possible that init_idle() gets called multiple times on a task,
++ * in that case do_set_cpus_allowed() will not do the right thing.
++ *
++ * And since this is boot we can forgo the serialisation.
++ */
++ set_cpus_allowed_common(idle, cpumask_of(cpu));
++#ifdef CONFIG_SMT_NICE
++ idle->smt_bias = 0;
++#endif
++#endif
++ set_rq_task(rq, idle);
++
++ /* Silence PROVE_RCU */
++ rcu_read_lock();
++ set_task_cpu(idle, cpu);
++ rcu_read_unlock();
++
++ rq->curr = rq->idle = idle;
++ idle->on_rq = TASK_ON_RQ_QUEUED;
++ raw_spin_unlock(rq->lock);
++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++ init_idle_preempt_count(idle, cpu);
++
++ ftrace_graph_init_idle_task(idle, cpu);
++ vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++ const struct cpumask __maybe_unused *trial)
++{
++ return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++ const struct cpumask *cs_cpus_allowed)
++{
++ int ret = 0;
++
++ /*
++ * Kthreads which disallow setaffinity shouldn't be moved
++ * to a new cpuset; we don't want to change their CPU
++ * affinity and isolating such threads by their set of
++ * allowed nodes is unnecessary. Thus, cpusets are not
++ * applicable for such threads. This prevents checking for
++ * success of set_cpus_allowed_ptr() on all attached tasks
++ * before cpus_allowed may be changed.
++ */
++ if (p->flags & PF_NO_SETAFFINITY)
++ ret = -EINVAL;
++
++ return ret;
++}
++
++void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rq_lock_irqsave(rq, &flags);
++ if (cpu_online(cpu) || cpu == smp_processor_id())
++ resched_curr(rq);
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu)
++{
++}
++
++void select_nohz_load_balancer(int stop_tick)
++{
++}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU. This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++ int i, cpu = smp_processor_id();
++ struct sched_domain *sd;
++
++ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
++ return cpu;
++
++ rcu_read_lock();
++ for_each_domain(cpu, sd) {
++ for_each_cpu(i, sched_domain_span(sd)) {
++ if (cpu == i)
++ continue;
++
++ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) {
++ cpu = i;
++ cpu = i;
++ goto unlock;
++ }
++ }
++ }
++
++ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
++ cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
++unlock:
++ rcu_read_unlock();
++ return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++void wake_up_idle_cpu(int cpu)
++{
++ if (cpu == smp_processor_id())
++ return;
++
++ if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static bool wake_up_full_nohz_cpu(int cpu)
++{
++ /*
++ * We just need the target to call irq_exit() and re-evaluate
++ * the next tick. The nohz full kick at least implies that.
++ * If needed we can still optimize that later with an
++ * empty IRQ.
++ */
++ if (cpu_is_offline(cpu))
++ return true; /* Don't try to wake offline CPUs. */
++ if (tick_nohz_full_cpu(cpu)) {
++ if (cpu != smp_processor_id() ||
++ tick_nohz_tick_stopped())
++ tick_nohz_full_kick_cpu(cpu);
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * Wake up the specified CPU. If the CPU is going offline, it is the
++ * caller's responsibility to deal with the lost wakeup, for example,
++ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
++ */
++void wake_up_nohz_cpu(int cpu)
++{
++ if (!wake_up_full_nohz_cpu(cpu))
++ wake_up_idle_cpu(cpu);
++}
++#endif /* CONFIG_NO_HZ_COMMON */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ bool queued = false, running_wrong = false, kthread;
++ struct cpumask old_mask;
++ unsigned long flags;
++ struct rq *rq;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ kthread = !!(p->flags & PF_KTHREAD);
++ if (kthread) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++
++ /*
++ * Must re-check here, to close a race against __kthread_bind(),
++ * sched_setaffinity() is not guaranteed to observe the flag.
++ */
++ if (check && (p->flags & PF_NO_SETAFFINITY)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ cpumask_copy(&old_mask, &p->cpus_allowed);
++ if (cpumask_equal(&old_mask, new_mask))
++ goto out;
++
++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ queued = task_queued(p);
++ __do_set_cpus_allowed(p, new_mask);
++
++ if (kthread) {
++ /*
++ * For kernel threads that do indeed end up on online &&
++ * !active we want to ensure they are strict per-CPU threads.
++ */
++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
++ !cpumask_intersects(new_mask, cpu_active_mask) &&
++ p->nr_cpus_allowed != 1);
++ }
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpumask_test_cpu(task_cpu(p), new_mask))
++ goto out;
++
++ if (task_running(rq, p)) {
++ /* Task is running on the wrong cpu now, reschedule it. */
++ if (rq == this_rq()) {
++ set_tsk_need_resched(p);
++ running_wrong = true;
++ } else
++ resched_task(p);
++ } else {
++ int cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++
++ if (queued) {
++ /*
++ * Switch runqueue locks after dequeueing the task
++ * here while still holding the pi_lock to be holding
++ * the correct lock for enqueueing.
++ */
++ dequeue_task(rq, p, 0);
++ rq_unlock(rq);
++
++ rq = cpu_rq(cpu);
++ rq_lock(rq);
++ }
++ set_task_cpu(p, cpu);
++ if (queued)
++ enqueue_task(rq, p, 0);
++ }
++ if (queued)
++ try_preempt(p, rq);
++ if (running_wrong)
++ preempt_disable();
++out:
++ task_rq_unlock(rq, p, &flags);
++
++ if (running_wrong) {
++ __schedule(true);
++ preempt_enable();
++ }
++
++ return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++ return __set_cpus_allowed_ptr(p, new_mask, false);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Run through task list and find tasks affined to the dead cpu, then remove
++ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
++ * cpu 0 and src_cpu's runqueue locks.
++ */
++static void bind_zero(int src_cpu)
++{
++ struct task_struct *p, *t;
++ struct rq *rq0;
++ int bound = 0;
++
++ if (src_cpu == 0)
++ return;
++
++ rq0 = cpu_rq(0);
++
++ do_each_thread(t, p) {
++ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) {
++ bool local = (task_cpu(p) == src_cpu);
++ struct rq *rq = task_rq(p);
++
++ /* task_running is the cpu stopper thread */
++ if (local && task_running(rq, p))
++ continue;
++ atomic_clear_cpu(src_cpu, &p->cpus_allowed);
++ atomic_set_cpu(0, &p->cpus_allowed);
++ p->zerobound = true;
++ bound++;
++ if (local) {
++ bool queued = task_queued(p);
++
++ if (queued)
++ dequeue_task(rq, p, 0);
++ set_task_cpu(p, 0);
++ if (queued)
++ enqueue_task(rq0, p, 0);
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (bound) {
++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
++ bound, src_cpu);
++ }
++}
++
++/* Find processes with the zerobound flag and reenable their affinity for the
++ * CPU coming alive. */
++static void unbind_zero(int src_cpu)
++{
++ int unbound = 0, zerobound = 0;
++ struct task_struct *p, *t;
++
++ if (src_cpu == 0)
++ return;
++
++ do_each_thread(t, p) {
++ if (!p->mm)
++ p->zerobound = false;
++ if (p->zerobound) {
++ unbound++;
++ cpumask_set_cpu(src_cpu, &p->cpus_allowed);
++ /* Once every CPU affinity has been re-enabled, remove
++ * the zerobound flag */
++ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) {
++ p->zerobound = false;
++ zerobound++;
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (unbound) {
++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
++ unbound, src_cpu);
++ }
++ if (zerobound) {
++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
++ zerobound);
++ }
++}
++
++/*
++ * Ensure that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm) {
++ switch_mm(mm, &init_mm, current);
++ finish_arch_post_lock_switch();
++ }
++ mmdrop(mm);
++}
++#else /* CONFIG_HOTPLUG_CPU */
++static void unbind_zero(int src_cpu) {}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++ struct sched_param start_param = { .sched_priority = 0 };
++ struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++ if (stop) {
++ /*
++ * Make it appear like a SCHED_FIFO task, its something
++ * userspace knows about and won't get confused about.
++ *
++ * Also, it will make PI more or less work without too
++ * much confusion -- but then, stop work should not
++ * rely on PI working anyway.
++ */
++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++ }
++
++ cpu_rq(cpu)->stop = stop;
++
++ if (old_stop) {
++ /*
++ * Reset it back to a normal scheduling policy so that
++ * it can die in pieces.
++ */
++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++ }
++}
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++
++static struct ctl_table sd_ctl_dir[] = {
++ {
++ .procname = "sched_domain",
++ .mode = 0555,
++ },
++ {}
++};
++
++static struct ctl_table sd_ctl_root[] = {
++ {
++ .procname = "kernel",
++ .mode = 0555,
++ .child = sd_ctl_dir,
++ },
++ {}
++};
++
++static struct ctl_table *sd_alloc_ctl_entry(int n)
++{
++ struct ctl_table *entry =
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
++
++ return entry;
++}
++
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
++#define CPU_LOAD_IDX_MAX 5
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
++static void
++set_table_entry(struct ctl_table *entry,
++ const char *procname, void *data, int maxlen,
++ umode_t mode, proc_handler *proc_handler,
++ bool load_idx)
++{
++ entry->procname = procname;
++ entry->data = data;
++ entry->maxlen = maxlen;
++ entry->mode = mode;
++ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
++}
++
++static struct ctl_table *
++sd_alloc_ctl_domain_table(struct sched_domain *sd)
++{
++ struct ctl_table *table = sd_alloc_ctl_entry(14);
++
++ if (table == NULL)
++ return NULL;
++
++ set_table_entry(&table[0], "min_interval", &sd->min_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[1], "max_interval", &sd->max_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[9], "cache_nice_tries",
++ &sd->cache_nice_tries,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[10], "flags", &sd->flags,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[11], "max_newidle_lb_cost",
++ &sd->max_newidle_lb_cost,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[12], "name", sd->name,
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
++ /* &table[13] is terminator */
++
++ return table;
++}
++
++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++{
++ struct ctl_table *entry, *table;
++ struct sched_domain *sd;
++ int domain_num = 0, i;
++ char buf[32];
++
++ for_each_domain(cpu, sd)
++ domain_num++;
++ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
++
++ i = 0;
++ for_each_domain(cpu, sd) {
++ snprintf(buf, 32, "domain%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_domain_table(sd);
++ entry++;
++ i++;
++ }
++ return table;
++}
++
++static cpumask_var_t sd_sysctl_cpus;
++static struct ctl_table_header *sd_sysctl_header;
++
++void register_sched_domain_sysctl(void)
++{
++ static struct ctl_table *cpu_entries;
++ static struct ctl_table **cpu_idx;
++ char buf[32];
++ int i;
++
++ if (!cpu_entries) {
++ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
++ if (!cpu_entries)
++ return;
++
++ WARN_ON(sd_ctl_dir[0].child);
++ sd_ctl_dir[0].child = cpu_entries;
++ }
++
++ if (!cpu_idx) {
++ struct ctl_table *e = cpu_entries;
++
++ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
++ if (!cpu_idx)
++ return;
++
++ /* deal with sparse possible map */
++ for_each_possible_cpu(i) {
++ cpu_idx[i] = e;
++ e++;
++ }
++ }
++
++ if (!cpumask_available(sd_sysctl_cpus)) {
++ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
++ return;
++
++ /* init to possible to not have holes in @cpu_entries */
++ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
++ }
++
++ for_each_cpu(i, sd_sysctl_cpus) {
++ struct ctl_table *e = cpu_idx[i];
++
++ if (e->child)
++ sd_free_ctl_entry(&e->child);
++
++ if (!e->procname) {
++ snprintf(buf, 32, "cpu%d", i);
++ e->procname = kstrdup(buf, GFP_KERNEL);
++ }
++ e->mode = 0555;
++ e->child = sd_alloc_ctl_cpu_table(i);
++
++ __cpumask_clear_cpu(i, sd_sysctl_cpus);
++ }
++
++ WARN_ON(sd_sysctl_header);
++ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
++}
++
++void dirty_sched_domain_sysctl(int cpu)
++{
++ if (cpumask_available(sd_sysctl_cpus))
++ __cpumask_set_cpu(cpu, sd_sysctl_cpus);
++}
++
++/* may be called multiple times per register */
++void unregister_sched_domain_sysctl(void)
++{
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++}
++#endif /* CONFIG_SYSCTL */
++
++void set_rq_online(struct rq *rq)
++{
++ if (!rq->online) {
++ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
++ rq->online = true;
++ }
++}
++
++void set_rq_offline(struct rq *rq)
++{
++ if (rq->online) {
++ int cpu = cpu_of(rq);
++
++ cpumask_clear_cpu(cpu, rq->rd->online);
++ rq->online = false;
++ clear_cpuidle_map(cpu);
++ }
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask. If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++ if (cpuhp_tasks_frozen) {
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ partition_sched_domains(1, NULL, NULL);
++ if (--num_cpus_frozen)
++ return;
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++ cpuset_force_rebuild();
++ }
++
++ cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++ if (!cpuhp_tasks_frozen) {
++ cpuset_update_active_cpus();
++ } else {
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ }
++ return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ set_cpu_active(cpu, true);
++
++ if (sched_smp_initialized) {
++ sched_domains_numa_masks_set(cpu);
++ cpuset_cpu_active();
++ }
++
++ /*
++ * Put the rq online, if not already. This happens:
++ *
++ * 1) In the early boot process, because we build the real domains
++ * after all CPUs have been brought up.
++ *
++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++ * domains.
++ */
++ rq_lock_irqsave(rq, &flags);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_online(rq);
++ }
++ unbind_zero(cpu);
++ rq_unlock_irqrestore(rq, &flags);
++
++ return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++ int ret;
++
++ set_cpu_active(cpu, false);
++ /*
++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++ * users of this state to go away such that all new such users will
++ * observe it.
++ *
++ * Do sync before park smpboot threads to take care the rcu boost case.
++ */
++ synchronize_rcu_mult(call_rcu, call_rcu_sched);
++
++ if (!sched_smp_initialized)
++ return 0;
++
++ ret = cpuset_cpu_inactive(cpu);
++ if (ret) {
++ set_cpu_active(cpu, true);
++ return ret;
++ }
++ sched_domains_numa_masks_clear(cpu);
++ return 0;
++}
++
++int sched_cpu_starting(unsigned int __maybe_unused cpu)
++{
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++int sched_cpu_dying(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ local_irq_save(flags);
++ double_rq_lock(rq, cpu_rq(0));
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ bind_zero(cpu);
++ double_rq_unlock(rq, cpu_rq(0));
++ sched_start_tick(rq, cpu);
++ hrexpiry_clear(rq);
++ local_irq_restore(flags);
++
++ return 0;
++}
++#endif
++
++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
++/*
++ * Cheaper version of the below functions in case support for SMT and MC is
++ * compiled in but CPUs have no siblings.
++ */
++static bool sole_cpu_idle(struct rq *rq)
++{
++ return rq_idle(rq);
++}
++#endif
++#ifdef CONFIG_SCHED_SMT
++static const cpumask_t *thread_cpumask(int cpu)
++{
++ return topology_sibling_cpumask(cpu);
++}
++/* All this CPU's SMT siblings are idle */
++static bool siblings_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
++}
++#endif
++#ifdef CONFIG_SCHED_MC
++static const cpumask_t *core_cpumask(int cpu)
++{
++ return topology_core_cpumask(cpu);
++}
++/* All this CPU's shared cache siblings are idle */
++static bool cache_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->core_mask, &cpu_idle_map);
++}
++#endif
++
++enum sched_domain_level {
++ SD_LV_NONE = 0,
++ SD_LV_SIBLING,
++ SD_LV_MC,
++ SD_LV_BOOK,
++ SD_LV_CPU,
++ SD_LV_NODE,
++ SD_LV_ALLNODES,
++ SD_LV_MAX
++};
++
++void __init sched_init_smp(void)
++{
++ struct rq *rq, *other_rq, *leader;
++ struct sched_domain *sd;
++ int cpu, other_cpu, i;
++#ifdef CONFIG_SCHED_SMT
++ bool smt_threads = false;
++#endif
++ sched_init_numa();
++
++ /*
++ * There's no userspace yet to cause hotplug operations; hence all the
++ * cpu masks are stable and all blatant races in the below code cannot
++ * happen.
++ */
++ mutex_lock(&sched_domains_mutex);
++ sched_init_domains(cpu_active_mask);
++ mutex_unlock(&sched_domains_mutex);
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
++ BUG();
++
++ mutex_lock(&sched_domains_mutex);
++ local_irq_disable();
++ lock_all_rqs();
++ /*
++ * Set up the relative cache distance of each online cpu from each
++ * other in a simple array for quick lookup. Locality is determined
++ * by the closest sched_domain that CPUs are separated by. CPUs with
++ * shared cache in SMT and MC are treated as local. Separate CPUs
++ * (within the same package or physically) within the same node are
++ * treated as not local. CPUs not even in the same domain (different
++ * nodes) are treated as very distant.
++ */
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ /* First check if this cpu is in the same node */
++ for_each_domain(cpu, sd) {
++ if (sd->level > SD_LV_MC)
++ continue;
++ leader = NULL;
++ /* Set locality to local node if not already found lower */
++ for_each_cpu(other_cpu, sched_domain_span(sd)) {
++ if (rqshare == RQSHARE_SMP) {
++ other_rq = cpu_rq(other_cpu);
++
++ /* Set the smp_leader to the first CPU */
++ if (!leader)
++ leader = rq;
++ other_rq->smp_leader = leader;
++ }
++
++ if (rq->cpu_locality[other_cpu] > 3)
++ rq->cpu_locality[other_cpu] = 3;
++ }
++ }
++
++ /*
++ * Each runqueue has its own function in case it doesn't have
++ * siblings of its own allowing mixed topologies.
++ */
++#ifdef CONFIG_SCHED_MC
++ leader = NULL;
++ if (cpumask_weight(core_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->core_mask);
++ for_each_cpu(other_cpu, core_cpumask(cpu)) {
++ if (rqshare == RQSHARE_MC) {
++ other_rq = cpu_rq(other_cpu);
++
++ /* Set the mc_leader to the first CPU */
++ if (!leader)
++ leader = rq;
++ other_rq->mc_leader = leader;
++ }
++ if (rq->cpu_locality[other_cpu] > 2)
++ rq->cpu_locality[other_cpu] = 2;
++ }
++ rq->cache_idle = cache_cpu_idle;
++ }
++#endif
++#ifdef CONFIG_SCHED_SMT
++ leader = NULL;
++ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->thread_mask);
++ for_each_cpu(other_cpu, thread_cpumask(cpu)) {
++ if (rqshare == RQSHARE_SMT) {
++ other_rq = cpu_rq(other_cpu);
++
++ /* Set the smt_leader to the first CPU */
++ if (!leader)
++ leader = rq;
++ other_rq->smt_leader = leader;
++ }
++ if (rq->cpu_locality[other_cpu] > 1)
++ rq->cpu_locality[other_cpu] = 1;
++ }
++ rq->siblings_idle = siblings_cpu_idle;
++ smt_threads = true;
++ }
++#endif
++ }
++
++#ifdef CONFIG_SMT_NICE
++ if (smt_threads) {
++ check_siblings = &check_smt_siblings;
++ wake_siblings = &wake_smt_siblings;
++ smt_schedule = &smt_should_schedule;
++ }
++#endif
++ unlock_all_rqs();
++ local_irq_enable();
++ mutex_unlock(&sched_domains_mutex);
++
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ for_each_online_cpu(other_cpu) {
++ if (other_cpu <= cpu)
++ continue;
++ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
++ }
++ }
++
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++ leader = rq->smp_leader;
++
++ rq_lock(rq);
++ if (leader && rq != leader) {
++ printk(KERN_INFO "Sharing SMP runqueue from CPU %d to CPU %d\n",
++ leader->cpu, rq->cpu);
++ kfree(rq->node);
++ kfree(rq->sl);
++ kfree(rq->lock);
++ rq->node = leader->node;
++ rq->sl = leader->sl;
++ rq->lock = leader->lock;
++ barrier();
++ /* To make up for not unlocking the freed runlock */
++ preempt_enable();
++ } else
++ rq_unlock(rq);
++ }
++
++#ifdef CONFIG_SCHED_MC
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++ leader = rq->mc_leader;
++
++ rq_lock(rq);
++ if (leader && rq != leader) {
++ printk(KERN_INFO "Sharing MC runqueue from CPU %d to CPU %d\n",
++ leader->cpu, rq->cpu);
++ kfree(rq->node);
++ kfree(rq->sl);
++ kfree(rq->lock);
++ rq->node = leader->node;
++ rq->sl = leader->sl;
++ rq->lock = leader->lock;
++ barrier();
++ /* To make up for not unlocking the freed runlock */
++ preempt_enable();
++ } else
++ rq_unlock(rq);
++ }
++#endif /* CONFIG_SCHED_MC */
++
++#ifdef CONFIG_SCHED_SMT
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ leader = rq->smt_leader;
++
++ rq_lock(rq);
++ if (leader && rq != leader) {
++ printk(KERN_INFO "Sharing SMT runqueue from CPU %d to CPU %d\n",
++ leader->cpu, rq->cpu);
++ kfree(rq->node);
++ kfree(rq->sl);
++ kfree(rq->lock);
++ rq->node = leader->node;
++ rq->sl = leader->sl;
++ rq->lock = leader->lock;
++ barrier();
++ /* To make up for not unlocking the freed runlock */
++ preempt_enable();
++ } else
++ rq_unlock(rq);
++ }
++#endif /* CONFIG_SCHED_SMT */
++
++ total_runqueues = 0;
++ for_each_possible_cpu(cpu) {
++ int locality, total_rqs = 0, total_cpus = 0;
++
++ rq = cpu_rq(cpu);
++ if (
++#ifdef CONFIG_SCHED_MC
++ (rq->mc_leader == rq) &&
++#endif
++#ifdef CONFIG_SCHED_SMT
++ (rq->smt_leader == rq) &&
++#endif
++ (rq->smp_leader == rq))
++ total_runqueues++;
++
++ for (locality = 0; locality <= 4; locality++) {
++ int test_cpu;
++
++ for_each_possible_cpu(test_cpu) {
++ /* Work from each CPU up instead of every rq
++ * starting at CPU 0 */
++ other_cpu = test_cpu + cpu;
++ other_cpu %= num_possible_cpus();
++ other_rq = cpu_rq(other_cpu);
++
++ if (rq->cpu_locality[other_cpu] == locality) {
++ rq->cpu_order[total_cpus++] = other_rq;
++ if (
++
++#ifdef CONFIG_SCHED_MC
++ (other_rq->mc_leader == other_rq) &&
++#endif
++#ifdef CONFIG_SCHED_SMT
++ (other_rq->smt_leader == other_rq) &&
++#endif
++ (other_rq->smp_leader == other_rq))
++ rq->rq_order[total_rqs++] = other_rq;
++ }
++ }
++ }
++ }
++
++ for_each_possible_cpu(cpu) {
++ rq = cpu_rq(cpu);
++ for (i = 0; i < total_runqueues; i++) {
++ printk(KERN_DEBUG "CPU %d RQ order %d RQ %d\n", cpu, i,
++ rq->rq_order[i]->cpu);
++ }
++ }
++ for_each_possible_cpu(cpu) {
++ rq = cpu_rq(cpu);
++ for (i = 0; i < num_possible_cpus(); i++) {
++ printk(KERN_DEBUG "CPU %d CPU order %d RQ %d\n", cpu, i,
++ rq->cpu_order[i]->cpu);
++ }
++ }
++ switch (rqshare) {
++ case RQSHARE_SMP:
++ printk(KERN_INFO "MuQSS runqueue share type SMP total runqueues: %d\n",
++ total_runqueues);
++ break;
++ case RQSHARE_MC:
++ printk(KERN_INFO "MuQSS runqueue share type MC total runqueues: %d\n",
++ total_runqueues);
++ break;
++ case RQSHARE_SMT:
++ printk(KERN_INFO "MuQSS runqueue share type SMT total runqueues: %d\n",
++ total_runqueues);
++ break;
++ case RQSHARE_NONE:
++ printk(KERN_INFO "MuQSS runqueue share type none total runqueues: %d\n",
++ total_runqueues);
++ break;
++ }
++
++ sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++ sched_smp_initialized = true;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++ struct cgroup_subsys_state css;
++
++ struct rcu_head rcu;
++ struct list_head list;
++
++ struct task_group *parent;
++ struct list_head siblings;
++ struct list_head children;
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++#ifdef CONFIG_SMP
++ int cpu_ids;
++#endif
++ int i;
++ struct rq *rq;
++
++ sched_clock_init();
++
++ wait_bit_init();
++
++ prio_ratios[0] = 128;
++ for (i = 1 ; i < NICE_WIDTH ; i++)
++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
++
++ skiplist_node_init(&init_task.node);
++
++#ifdef CONFIG_SMP
++ init_defrootdomain();
++ cpumask_clear(&cpu_idle_map);
++#else
++ uprq = &per_cpu(runqueues, 0);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++ task_group_cache = KMEM_CACHE(task_group, 0);
++
++ list_add(&root_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&root_task_group.children);
++ INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ rq->node = kmalloc(sizeof(skiplist_node), GFP_ATOMIC);
++ skiplist_init(rq->node);
++ rq->sl = new_skiplist(rq->node);
++ rq->lock = kmalloc(sizeof(raw_spinlock_t), GFP_ATOMIC);
++ raw_spin_lock_init(rq->lock);
++ rq->nr_running = 0;
++ rq->nr_uninterruptible = 0;
++ rq->nr_switches = 0;
++ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
++ rq->last_jiffy = jiffies;
++ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
++ rq->iowait_ns = rq->idle_ns = 0;
++ rq->dither = 0;
++ set_rq_task(rq, &init_task);
++ rq->iso_ticks = 0;
++ rq->iso_refractory = false;
++#ifdef CONFIG_SMP
++ rq->smp_leader = rq;
++#ifdef CONFIG_SCHED_MC
++ rq->mc_leader = rq;
++#endif
++#ifdef CONFIG_SCHED_SMT
++ rq->smt_leader = rq;
++#endif
++ rq->sd = NULL;
++ rq->rd = NULL;
++ rq->online = false;
++ rq->cpu = i;
++ rq_attach_root(rq, &def_root_domain);
++#endif
++ init_rq_hrexpiry(rq);
++ atomic_set(&rq->nr_iowait, 0);
++ }
++
++#ifdef CONFIG_SMP
++ cpu_ids = i;
++ /*
++ * Set the base locality for cpu cache distance calculation to
++ * "distant" (3). Make sure the distance from a CPU to itself is 0.
++ */
++ for_each_possible_cpu(i) {
++ int j;
++
++ rq = cpu_rq(i);
++#ifdef CONFIG_SCHED_SMT
++ rq->siblings_idle = sole_cpu_idle;
++#endif
++#ifdef CONFIG_SCHED_MC
++ rq->cache_idle = sole_cpu_idle;
++#endif
++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
++ for_each_possible_cpu(j) {
++ if (i == j)
++ rq->cpu_locality[j] = 0;
++ else
++ rq->cpu_locality[j] = 4;
++ }
++ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
++ rq->cpu_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
++ rq->rq_order[0] = rq->cpu_order[0] = rq;
++ for (j = 1; j < cpu_ids; j++)
++ rq->rq_order[j] = rq->cpu_order[j] = cpu_rq(j);
++ }
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ mmgrab(&init_mm);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++
++#ifdef CONFIG_SMP
++ idle_thread_set_boot_cpu();
++#endif /* SMP */
++
++ init_schedstats();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++ int nested = preempt_count() + rcu_preempt_depth();
++
++ return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++ /*
++ * Blocking primitives will set (and therefore destroy) current->state,
++ * since we will exit with TASK_RUNNING make sure we enter with it,
++ * otherwise we will destroy state.
++ */
++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++ "do not call blocking ops when !TASK_RUNNING; "
++ "state=%lx set at [<%p>] %pS\n",
++ current->state,
++ (void *)current->task_state_change,
++ (void *)current->task_state_change);
++
++ ___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++ /* Ratelimiting timestamp: */
++ static unsigned long prev_jiffy;
++
++ unsigned long preempt_disable_ip;
++
++ /* WARN_ON_ONCE() by default, no rate limit required: */
++ rcu_sleep_check();
++
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++ oops_in_progress)
++ return;
++
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ /* Save this before calling printk(), since that will clobber it: */
++ preempt_disable_ip = get_preempt_disable_ip(current);
++
++ printk(KERN_ERR
++ "BUG: sleeping function called from invalid context at %s:%d\n",
++ file, line);
++ printk(KERN_ERR
++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ if (task_stack_end_corrupted(current))
++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && !preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static inline void normalise_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (p->flags & PF_KTHREAD)
++ continue;
++
++ if (!rt_task(p) && !iso_task(p))
++ continue;
++
++ rq = task_rq_lock(p, &flags);
++ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
++ task_rq_unlock(rq, p, &flags);
++ }
++ read_unlock(&tasklist_lock);
++}
++
++void normalize_rt_tasks(void)
++{
++ normalise_rt_tasks();
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner. This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++void init_idle_bootup_task(struct task_struct *idle)
++{}
++
++#ifdef CONFIG_SCHED_DEBUG
++__read_mostly bool sched_debug_enabled;
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++ struct seq_file *m)
++{}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
++#endif
++
++#ifdef CONFIG_SMP
++#define SCHED_LOAD_SHIFT (10)
++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
++
++unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
++{
++ return SCHED_LOAD_SCALE;
++}
++
++unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
++{
++ unsigned long weight = cpumask_weight(sched_domain_span(sd));
++ unsigned long smt_gain = sd->smt_gain;
++
++ smt_gain /= weight;
++
++ return smt_gain;
++}
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++ kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++
++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++ /* Now it should be safe to free those cfs_rqs */
++ sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++ /* Wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct task_group *parent = css_tg(parent_css);
++ struct task_group *tg;
++
++ if (!parent) {
++ /* This is early initialization for the top cgroup */
++ return &root_task_group.css;
++ }
++
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++ return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++ struct task_group *parent = css_tg(css->parent);
++
++ if (parent)
++ sched_online_group(tg, parent);
++ return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ /*
++ * Relies on the RCU grace period between css_released() and this.
++ */
++ sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++ return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++static struct cftype cpu_legacy_files[] = {
++ { } /* Terminate */
++};
++
++static struct cftype cpu_files[] = {
++ { } /* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++ struct cgroup_subsys_state *css)
++{
++ return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++ .css_alloc = cpu_cgroup_css_alloc,
++ .css_online = cpu_cgroup_css_online,
++ .css_released = cpu_cgroup_css_released,
++ .css_free = cpu_cgroup_css_free,
++ .css_extra_stat_show = cpu_extra_stat_show,
++ .fork = cpu_cgroup_fork,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .legacy_cftypes = cpu_files,
++ .legacy_cftypes = cpu_legacy_files,
++ .dfl_cftypes = cpu_files,
++ .early_init = true,
++ .threaded = true,
++};
++#endif /* CONFIG_CGROUP_SCHED */
+diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
+new file mode 100644
+index 000000000000..4784a9486aa2
+--- /dev/null
++++ b/kernel/sched/MuQSS.h
+@@ -0,0 +1,768 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef MUQSS_SCHED_H
++#define MUQSS_SCHED_H
++
++#include <linux/sched.h>
++#include <linux/cpuidle.h>
++#include <linux/freezer.h>
++#include <linux/interrupt.h>
++#include <linux/skip_list.h>
++#include <linux/stop_machine.h>
++#include <linux/sched/topology.h>
++#include <linux/u64_stats_sync.h>
++#include <linux/tsacct_kern.h>
++#include <linux/sched/clock.h>
++#include <linux/sched/wake_q.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/hotplug.h>
++#include <linux/sched/task.h>
++#include <linux/sched/task_stack.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/init.h>
++
++#include <linux/u64_stats_sync.h>
++#include <linux/kernel_stat.h>
++#include <linux/tick.h>
++#include <linux/slab.h>
++#include <linux/cgroup.h>
++
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
++#else
++# define SCHED_WARN_ON(x) ((void)(x))
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED 1
++#define TASK_ON_RQ_MIGRATING 2
++
++struct rq;
++
++#ifdef CONFIG_SMP
++
++static inline bool sched_asym_prefer(int a, int b)
++{
++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
++}
++
++/*
++ * We add the notion of a root-domain which will be used to define per-domain
++ * variables. Each exclusive cpuset essentially defines an island domain by
++ * fully partitioning the member cpus from any other cpuset. Whenever a new
++ * exclusive cpuset is created, we also create and attach a new root-domain
++ * object.
++ *
++ */
++struct root_domain {
++ atomic_t refcount;
++ atomic_t rto_count;
++ struct rcu_head rcu;
++ cpumask_var_t span;
++ cpumask_var_t online;
++
++ /* Indicate more than one runnable task for any CPU */
++ bool overload;
++
++ /*
++ * The bit corresponding to a CPU gets set here if such CPU has more
++ * than one runnable -deadline task (as it is below for RT tasks).
++ */
++ cpumask_var_t dlo_mask;
++ atomic_t dlo_count;
++ /* Replace unused CFS structures with void */
++ //struct dl_bw dl_bw;
++ //struct cpudl cpudl;
++ void *dl_bw;
++ void *cpudl;
++
++ /*
++ * The "RT overload" flag: it gets set if a CPU has more than
++ * one runnable RT task.
++ */
++ cpumask_var_t rto_mask;
++ //struct cpupri cpupri;
++ void *cpupri;
++
++ unsigned long max_cpu_capacity;
++};
++
++extern struct root_domain def_root_domain;
++extern struct mutex sched_domains_mutex;
++
++extern void init_defrootdomain(void);
++extern int sched_init_domains(const struct cpumask *cpu_map);
++extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
++
++static inline void cpupri_cleanup(void __maybe_unused *cpupri)
++{
++}
++
++static inline void cpudl_cleanup(void __maybe_unused *cpudl)
++{
++}
++
++static inline void init_dl_bw(void __maybe_unused *dl_bw)
++{
++}
++
++static inline int cpudl_init(void __maybe_unused *dl_bw)
++{
++ return 0;
++}
++
++static inline int cpupri_init(void __maybe_unused *cpupri)
++{
++ return 0;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++ raw_spinlock_t *lock;
++ raw_spinlock_t *orig_lock;
++
++ struct task_struct *curr, *idle, *stop;
++ struct mm_struct *prev_mm;
++
++ unsigned int nr_running;
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
++ u64 nr_switches;
++
++ /* Stored data about rq->curr to work outside rq lock */
++ u64 rq_deadline;
++ int rq_prio;
++
++ /* Best queued id for use outside lock */
++ u64 best_key;
++
++ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
++ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
++ u64 niffies; /* Last time this RQ updated rq clock */
++ u64 last_niffy; /* Last niffies as updated by local clock */
++ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
++
++ u64 load_update; /* When we last updated load */
++ unsigned long load_avg; /* Rolling load average */
++#ifdef CONFIG_SMT_NICE
++ struct mm_struct *rq_mm;
++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++ /* Accurate timekeeping data */
++ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
++ iowait_ns, idle_ns;
++ atomic_t nr_iowait;
++
++ skiplist_node *node;
++ skiplist *sl;
++#ifdef CONFIG_SMP
++ struct task_struct *preempt; /* Preempt triggered on this task */
++ struct task_struct *preempting; /* Hint only, what task is preempting */
++
++ int cpu; /* cpu of this runqueue */
++ bool online;
++
++ struct root_domain *rd;
++ struct sched_domain *sd;
++
++ unsigned long cpu_capacity_orig;
++
++ int *cpu_locality; /* CPU relative cache distance */
++ struct rq **rq_order; /* Shared RQs ordered by relative cache distance */
++ struct rq **cpu_order; /* RQs of discrete CPUs ordered by distance */
++
++ struct rq *smp_leader; /* First physical CPU per node */
++#ifdef CONFIG_SCHED_SMT
++ struct rq *smt_leader; /* First logical CPU in SMT siblings */
++ cpumask_t thread_mask;
++ bool (*siblings_idle)(struct rq *rq);
++ /* See if all smt siblings are idle */
++#endif /* CONFIG_SCHED_SMT */
++#ifdef CONFIG_SCHED_MC
++ struct rq *mc_leader; /* First logical CPU in MC siblings */
++ cpumask_t core_mask;
++ bool (*cache_idle)(struct rq *rq);
++ /* See if all cache siblings are idle */
++#endif /* CONFIG_SCHED_MC */
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++ u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++ u64 clock, old_clock, last_tick;
++ u64 clock_task;
++ int dither;
++
++ int iso_ticks;
++ bool iso_refractory;
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++ struct hrtimer hrexpiry_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++ /* latency stats */
++ struct sched_info rq_sched_info;
++ unsigned long long rq_cpu_time;
++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++ /* sys_sched_yield() stats */
++ unsigned int yld_count;
++
++ /* schedule() stats */
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_SMP
++ struct llist_head wake_list;
++#endif
++
++#ifdef CONFIG_CPU_IDLE
++ /* Must be inspected within a rcu lock section */
++ struct cpuidle_state *idle_state;
++#endif
++};
++
++#ifdef CONFIG_SMP
++struct rq *cpu_rq(int cpu);
++#endif
++
++#ifndef CONFIG_SMP
++extern struct rq *uprq;
++#define cpu_rq(cpu) (uprq)
++#define this_rq() (uprq)
++#define raw_rq() (uprq)
++#define task_rq(p) (uprq)
++#define cpu_curr(cpu) ((uprq)->curr)
++#else /* CONFIG_SMP */
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define this_rq() this_cpu_ptr(&runqueues)
++#define raw_rq() raw_cpu_ptr(&runqueues)
++#define task_rq(p) cpu_rq(task_cpu(p))
++#endif /* CONFIG_SMP */
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++ return p->on_cpu;
++#else
++ return task_current(rq, p);
++#endif
++}
++
++static inline void rq_lock(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock(rq->lock);
++}
++
++static inline void rq_unlock(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock(rq->lock);
++}
++
++static inline void rq_lock_irq(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irq(rq->lock);
++}
++
++static inline void rq_unlock_irq(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irq(rq->lock);
++}
++
++static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irqsave(rq->lock, *flags);
++}
++
++static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irqrestore(rq->lock, *flags);
++}
++
++static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
++ __acquires(p->pi_lock)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ while (42) {
++ raw_spin_lock_irqsave(&p->pi_lock, *flags);
++ rq = task_rq(p);
++ raw_spin_lock(rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++ }
++ return rq;
++}
++
++static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
++ __releases(rq->lock)
++ __releases(p->pi_lock)
++{
++ rq_unlock(rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++}
++
++static inline struct rq *__task_rq_lock(struct task_struct *p)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ lockdep_assert_held(&p->pi_lock);
++
++ while (42) {
++ rq = task_rq(p);
++ raw_spin_lock(rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(rq->lock);
++ }
++ return rq;
++}
++
++static inline void __task_rq_unlock(struct rq *rq)
++{
++ rq_unlock(rq);
++}
++
++/*
++ * {de,en}queue flags: Most not used on MuQSS.
++ *
++ * DEQUEUE_SLEEP - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
++ * are in a known state which allows modification. Such pairs
++ * should preserve as much state as possible.
++ *
++ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
++ * in the runqueue.
++ *
++ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
++ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
++ * ENQUEUE_MIGRATED - the task was migrated during wakeup
++ *
++ */
++
++#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
++
++#define ENQUEUE_RESTORE 0x02
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++ return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++ lockdep_assert_held(rq->lock);
++
++ return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++ lockdep_assert_held(rq->lock);
++
++ return rq->clock_task;
++}
++
++#ifdef CONFIG_NUMA
++enum numa_topology_type {
++ NUMA_DIRECT,
++ NUMA_GLUELESS_MESH,
++ NUMA_BACKPLANE,
++};
++extern enum numa_topology_type sched_numa_topology_type;
++extern int sched_max_numa_distance;
++extern bool find_numa_distance(int distance);
++
++extern void sched_init_numa(void);
++extern void sched_domains_numa_masks_set(unsigned int cpu);
++extern void sched_domains_numa_masks_clear(unsigned int cpu);
++#else
++static inline void sched_init_numa(void) { }
++static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
++static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
++#endif
++
++extern struct mutex sched_domains_mutex;
++extern struct static_key_false sched_schedstats;
++
++#define rcu_dereference_check_sched_domain(p) \
++ rcu_dereference_check((p), \
++ lockdep_is_held(&sched_domains_mutex))
++
++#ifdef CONFIG_SMP
++
++/*
++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
++ * See detach_destroy_domains: synchronize_sched for details.
++ *
++ * The domain tree of any CPU may only be accessed from within
++ * preempt-disabled sections.
++ */
++#define for_each_domain(cpu, __sd) \
++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
++ __sd; __sd = __sd->parent)
++
++#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
++
++/**
++ * highest_flag_domain - Return highest sched_domain containing flag.
++ * @cpu: The cpu whose highest level of sched domain is to
++ * be returned.
++ * @flag: The flag to check for the highest sched_domain
++ * for the given cpu.
++ *
++ * Returns the highest sched_domain of a cpu which contains the given flag.
++ */
++static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd, *hsd = NULL;
++
++ for_each_domain(cpu, sd) {
++ if (!(sd->flags & flag))
++ break;
++ hsd = sd;
++ }
++
++ return hsd;
++}
++
++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd;
++
++ for_each_domain(cpu, sd) {
++ if (sd->flags & flag)
++ break;
++ }
++
++ return sd;
++}
++
++DECLARE_PER_CPU(struct sched_domain *, sd_llc);
++DECLARE_PER_CPU(int, sd_llc_size);
++DECLARE_PER_CPU(int, sd_llc_id);
++DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
++DECLARE_PER_CPU(struct sched_domain *, sd_numa);
++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
++
++struct sched_group_capacity {
++ atomic_t ref;
++ /*
++ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
++ * for a single CPU.
++ */
++ unsigned long capacity;
++ unsigned long min_capacity; /* Min per-CPU capacity in group */
++ unsigned long next_update;
++ int imbalance; /* XXX unrelated to capacity but shared group state */
++
++#ifdef CONFIG_SCHED_DEBUG
++ int id;
++#endif
++
++ unsigned long cpumask[0]; /* balance mask */
++};
++
++struct sched_group {
++ struct sched_group *next; /* Must be a circular list */
++ atomic_t ref;
++
++ unsigned int group_weight;
++ struct sched_group_capacity *sgc;
++ int asym_prefer_cpu; /* cpu of highest priority in group */
++
++ /*
++ * The CPUs this group covers.
++ *
++ * NOTE: this field is variable length. (Allocated dynamically
++ * by attaching extra space to the end of the structure,
++ * depending on how many CPUs the kernel has booted up with)
++ */
++ unsigned long cpumask[0];
++};
++
++static inline struct cpumask *sched_group_span(struct sched_group *sg)
++{
++ return to_cpumask(sg->cpumask);
++}
++
++/*
++ * See build_balance_mask().
++ */
++static inline struct cpumask *group_balance_mask(struct sched_group *sg)
++{
++ return to_cpumask(sg->sgc->cpumask);
++}
++
++/**
++ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
++ * @group: The group whose first cpu is to be returned.
++ */
++static inline unsigned int group_first_cpu(struct sched_group *group)
++{
++ return cpumask_first(sched_group_span(group));
++}
++
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void dirty_sched_domain_sysctl(int cpu);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void dirty_sched_domain_sysctl(int cpu)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern void sched_ttwu_pending(void);
++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
++extern void set_rq_online (struct rq *rq);
++extern void set_rq_offline(struct rq *rq);
++extern bool sched_smp_initialized;
++
++static inline void update_group_capacity(struct sched_domain *sd, int cpu)
++{
++}
++
++static inline void trigger_load_balance(struct rq *rq)
++{
++}
++
++#define sched_feat(x) 0
++
++#else /* CONFIG_SMP */
++
++static inline void sched_ttwu_pending(void) { }
++
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++ rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ SCHED_WARN_ON(!rcu_read_lock_held());
++ return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ return NULL;
++}
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++extern bool sched_debug_enabled;
++#endif
++
++extern void schedule_idle(void);
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++ u64 total;
++ u64 tick_delta;
++ u64 irq_start_time;
++ struct u64_stats_sync sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++ unsigned int seq;
++ u64 total;
++
++ do {
++ seq = __u64_stats_fetch_begin(&irqtime->sync);
++ total = irqtime->total;
++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++ return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_SMP
++static inline int cpu_of(struct rq *rq)
++{
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static inline int cpu_of(struct rq *rq)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
++
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
++{
++ struct update_util_data *data;
++
++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++ cpu_of(rq)));
++
++ if (data)
++ data->func(data, rq->niffies, flags);
++}
++#else
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
++{
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant() (true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant() (false)
++#endif
++
++/*
++ * This should only be called when current == rq->idle. Dodgy workaround for
++ * when softirqs are pending and we are in the idle loop. Setting current to
++ * resched will kick us out of the idle loop and the softirqs will be serviced
++ * on our next pass through schedule().
++ */
++static inline bool softirq_pending(int cpu)
++{
++ if (likely(!local_softirq_pending()))
++ return false;
++ set_tsk_need_resched(current);
++ return true;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return tsk_seruntime(t);
++}
++#else
++struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags);
++void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags);
++
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ unsigned long flags;
++ u64 ns;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &flags);
++ ns = tsk_seruntime(t);
++ task_rq_unlock(rq, t, &flags);
++
++ return ns;
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++ return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++#ifndef arch_scale_cpu_capacity
++static __always_inline
++unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
++{
++ if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
++ return sd->smt_gain / sd->span_weight;
++
++ return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++#define SCHED_FLAG_SUGOV 0x10000000
++
++#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
++
++static inline unsigned long cpu_util_dl(struct rq *rq)
++{
++ return 0;
++}
++
++static inline unsigned long cpu_util_cfs(struct rq *rq)
++{
++ unsigned long ret = rq->load_avg;
++
++ if (ret > SCHED_CAPACITY_SCALE)
++ ret = SCHED_CAPACITY_SCALE;
++ return ret;
++}
++
++#endif
++
++#endif /* MUQSS_SCHED_H */
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 7936f548e071..02ca5f74565b 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -480,7 +480,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ struct task_struct *thread;
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
++#ifdef CONFIG_SCHED_MUQSS
++ .sched_policy = SCHED_ISO,
++#else
+ .sched_policy = SCHED_DEADLINE,
++#endif
+ .sched_flags = SCHED_FLAG_SUGOV,
+ .sched_nice = 0,
+ .sched_priority = 0,
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index bac6ac9a4ec7..e3153ba66834 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -269,26 +269,6 @@ static inline u64 account_other_time(u64 max)
+ return accounted;
+ }
+
+-#ifdef CONFIG_64BIT
+-static inline u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- return t->se.sum_exec_runtime;
+-}
+-#else
+-static u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- u64 ns;
+- struct rq_flags rf;
+- struct rq *rq;
+-
+- rq = task_rq_lock(t, &rf);
+- ns = t->se.sum_exec_runtime;
+- task_rq_unlock(rq, t, &rf);
+-
+- return ns;
+-}
+-#endif
+-
+ /*
+ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
+ * tasks (sum on group iteration) belonging to @tsk's group.
+@@ -666,7 +646,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ struct task_cputime cputime = {
+- .sum_exec_runtime = p->se.sum_exec_runtime,
++ .sum_exec_runtime = tsk_seruntime(p),
+ };
+
+ task_cputime(p, &cputime.utime, &cputime.stime);
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 7dae9eb8c042..0a374d84c2ef 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -210,6 +210,8 @@ static void cpuidle_idle_call(void)
+ static void do_idle(void)
+ {
+ int cpu = smp_processor_id();
++ bool pending = false;
++
+ /*
+ * If the arch has a polling bit, we maintain an invariant:
+ *
+@@ -220,7 +222,10 @@ static void do_idle(void)
+ */
+
+ __current_set_polling();
+- tick_nohz_idle_enter();
++ if (unlikely(softirq_pending(cpu)))
++ pending = true;
++ else
++ tick_nohz_idle_enter();
+
+ while (!need_resched()) {
+ check_pgt_cache();
+@@ -255,7 +260,8 @@ static void do_idle(void)
+ * an IPI to fold the state for us.
+ */
+ preempt_set_need_resched();
+- tick_nohz_idle_exit();
++ if (!pending)
++ tick_nohz_idle_exit();
+ __current_clr_polling();
+
+ /*
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index fb5fc458547f..ba3d008c1538 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
++#ifdef CONFIG_SCHED_MUQSS
++#include "MuQSS.h"
++#else /* CONFIG_SCHED_MUQSS */
+ #include <linux/sched.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/sysctl.h>
+@@ -2133,3 +2136,30 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
+ }
+
+ #endif
++
++/* MuQSS compatibility functions */
++static inline bool softirq_pending(int cpu)
++{
++ return false;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return t->se.sum_exec_runtime;
++}
++#else
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ u64 ns;
++ struct rq_flags rf;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &rf);
++ ns = t->se.sum_exec_runtime;
++ task_rq_unlock(rq, t, &rf);
++
++ return ns;
++}
++#endif
++#endif /* CONFIG_SCHED_MUQSS */
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 519b024f4e94..de9852460fd6 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -227,7 +227,11 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
+ struct root_domain *old_rd = NULL;
+ unsigned long flags;
+
++#ifdef CONFIG_SCHED_MUQSS
++ raw_spin_lock_irqsave(rq->lock, flags);
++#else
+ raw_spin_lock_irqsave(&rq->lock, flags);
++#endif
+
+ if (rq->rd) {
+ old_rd = rq->rd;
+@@ -253,7 +257,11 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
+ if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
+ set_rq_online(rq);
+
++#ifdef CONFIG_SCHED_MUQSS
++ raw_spin_unlock_irqrestore(rq->lock, flags);
++#else
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
++#endif
+
+ if (old_rd)
+ call_rcu_sched(&old_rd->rcu, free_rootdomain);
+diff --git a/kernel/skip_list.c b/kernel/skip_list.c
+new file mode 100644
+index 000000000000..bf5c6e97e139
+--- /dev/null
++++ b/kernel/skip_list.c
+@@ -0,0 +1,148 @@
++/*
++ Copyright (C) 2011,2016 Con Kolivas.
++
++ Code based on example originally by William Pugh.
++
++Skip Lists are a probabilistic alternative to balanced trees, as
++described in the June 1990 issue of CACM and were invented by
++William Pugh in 1987.
++
++A couple of comments about this implementation:
++The routine randomLevel has been hard-coded to generate random
++levels using p=0.25. It can be easily changed.
++
++The insertion routine has been implemented so as to use the
++dirty hack described in the CACM paper: if a random level is
++generated that is more than the current maximum level, the
++current maximum level plus one is used instead.
++
++Levels start at zero and go up to MaxLevel (which is equal to
++MaxNumberOfLevels-1).
++
++The routines defined in this file are:
++
++init: defines slnode
++
++new_skiplist: returns a new, empty list
++
++randomLevel: Returns a random level based on a u64 random seed passed to it.
++In MuQSS, the "niffy" time is used for this purpose.
++
++insert(l,key, value): inserts the binding (key, value) into l. This operation
++occurs in O(log n) time.
++
++delnode(slnode, l, node): deletes any binding of key from the l based on the
++actual node value. This operation occurs in O(k) time where k is the
++number of levels of the node in question (max 8). The original delete
++function occurred in O(log n) time and involved a search.
++
++MuQSS Notes: In this implementation of skiplists, there are bidirectional
++next/prev pointers and the insert function returns a pointer to the actual
++node the value is stored. The key here is chosen by the scheduler so as to
++sort tasks according to the priority list requirements and is no longer used
++by the scheduler after insertion. The scheduler lookup, however, occurs in
++O(1) time because it is always the first item in the level 0 linked list.
++Since the task struct stores a copy of the node pointer upon skiplist_insert,
++it can also remove it much faster than the original implementation with the
++aid of prev<->next pointer manipulation and no searching.
++
++*/
++
++#include <linux/slab.h>
++#include <linux/skip_list.h>
++
++#define MaxNumberOfLevels 8
++#define MaxLevel (MaxNumberOfLevels - 1)
++
++void skiplist_init(skiplist_node *slnode)
++{
++ int i;
++
++ slnode->key = 0xFFFFFFFFFFFFFFFF;
++ slnode->level = 0;
++ slnode->value = NULL;
++ for (i = 0; i < MaxNumberOfLevels; i++)
++ slnode->next[i] = slnode->prev[i] = slnode;
++}
++
++skiplist *new_skiplist(skiplist_node *slnode)
++{
++ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
++
++ BUG_ON(!l);
++ l->header = slnode;
++ return l;
++}
++
++void free_skiplist(skiplist *l)
++{
++ skiplist_node *p, *q;
++
++ p = l->header;
++ do {
++ q = p->next[0];
++ p->next[0]->prev[0] = q->prev[0];
++ skiplist_node_init(p);
++ p = q;
++ } while (p != l->header);
++ kfree(l);
++}
++
++void skiplist_node_init(skiplist_node *node)
++{
++ memset(node, 0, sizeof(skiplist_node));
++}
++
++static inline unsigned int randomLevel(const long unsigned int randseed)
++{
++ return find_first_bit(&randseed, MaxLevel) / 2;
++}
++
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
++{
++ skiplist_node *update[MaxNumberOfLevels];
++ skiplist_node *p, *q;
++ int k = l->level;
++
++ p = l->header;
++ do {
++ while (q = p->next[k], q->key <= key)
++ p = q;
++ update[k] = p;
++ } while (--k >= 0);
++
++ ++l->entries;
++ k = randomLevel(randseed);
++ if (k > l->level) {
++ k = ++l->level;
++ update[k] = l->header;
++ }
++
++ node->level = k;
++ node->key = key;
++ node->value = value;
++ do {
++ p = update[k];
++ node->next[k] = p->next[k];
++ p->next[k] = node;
++ node->prev[k] = p;
++ node->next[k]->prev[k] = node;
++ } while (--k >= 0);
++}
++
++void skiplist_delete(skiplist *l, skiplist_node *node)
++{
++ int k, m = node->level;
++
++ for (k = 0; k <= m; k++) {
++ node->prev[k]->next[k] = node->next[k];
++ node->next[k]->prev[k] = node->prev[k];
++ }
++ skiplist_node_init(node);
++ if (m == l->level) {
++ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
++ m--;
++ l->level = m;
++ }
++ l->entries--;
++}
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index f98f28c12020..18ba455647c9 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -126,9 +126,17 @@ static int __maybe_unused one __read_only = 1;
+ static int __maybe_unused two __read_only = 2;
+ static int __maybe_unused four __read_only = 4;
+ static unsigned long one_ul __read_only = 1;
+-static int one_hundred __read_only = 100;
+-static int one_thousand __read_only = 1000;
+-#ifdef CONFIG_PRINTK
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
++#ifdef CONFIG_SCHED_MUQSS
++extern int rr_interval;
++extern int sched_interactive;
++extern int sched_iso_cpu;
++extern int sched_yield_type;
++#endif
++extern int hrtimer_granularity_us;
++extern int hrtimeout_min_us;
++#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
+ static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+@@ -288,7 +296,7 @@ static struct ctl_table sysctl_base_table[] = {
+ { }
+ };
+
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
+ static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
+ static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+ static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
+@@ -305,6 +313,7 @@ static int max_extfrag_threshold = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_MUQSS
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -467,6 +476,7 @@ static struct ctl_table kern_table[] = {
+ .extra1 = &one,
+ },
+ #endif
++#endif /* !CONFIG_SCHED_MUQSS */
+ #ifdef CONFIG_PROVE_LOCKING
+ {
+ .procname = "prove_locking",
+@@ -1025,6 +1035,62 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_SCHED_MUQSS
++ {
++ .procname = "rr_interval",
++ .data = &rr_interval,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one_thousand,
++ },
++ {
++ .procname = "interactive",
++ .data = &sched_interactive,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++ {
++ .procname = "iso_cpu",
++ .data = &sched_iso_cpu,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one_hundred,
++ },
++ {
++ .procname = "yield_type",
++ .data = &sched_yield_type,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
++#endif
++ {
++ .procname = "hrtimer_granularity_us",
++ .data = &hrtimer_granularity_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
++ {
++ .procname = "hrtimeout_min_us",
++ .data = &hrtimeout_min_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+index 16c027e9cc73..37162d7bd922 100644
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -198,8 +198,9 @@ int clockevents_tick_resume(struct clock_event_device *dev)
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+
+-/* Limit min_delta to a jiffie */
+-#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
++int __read_mostly hrtimer_granularity_us = 100;
++/* Limit min_delta to 100us */
++#define MIN_DELTA_LIMIT (hrtimer_granularity_us * NSEC_PER_USEC)
+
+ /**
+ * clockevents_increase_min_delta - raise minimum delta of a clock event device
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 23788100e214..e215dbe39946 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1974,3 +1974,117 @@ int __sched schedule_hrtimeout(ktime_t *expires,
+ return schedule_hrtimeout_range(expires, 0, mode);
+ }
+ EXPORT_SYMBOL_GPL(schedule_hrtimeout);
++
++/*
++ * As per schedule_hrtimeout but taskes a millisecond value and returns how
++ * many milliseconds are left.
++ */
++long __sched schedule_msec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ int delta, jiffs;
++ ktime_t expires;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ jiffs = msecs_to_jiffies(timeout);
++ /*
++ * If regular timer resolution is adequate or hrtimer resolution is not
++ * (yet) better than Hz, as would occur during startup, use regular
++ * timers.
++ */
++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
++ return schedule_timeout(jiffs);
++
++ delta = (timeout % 1000) * NSEC_PER_MSEC;
++ expires = ktime_set(0, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_ms(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++EXPORT_SYMBOL(schedule_msec_hrtimeout);
++
++#define USECS_PER_SEC 1000000
++extern int hrtimer_granularity_us;
++
++static inline long schedule_usec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ ktime_t expires;
++ int delta;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ return schedule_timeout(usecs_to_jiffies(timeout));
++
++ if (timeout < hrtimer_granularity_us)
++ timeout = hrtimer_granularity_us;
++ delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
++ expires = ktime_set(0, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_us(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++int __read_mostly hrtimeout_min_us = 1000;
++
++long __sched schedule_min_hrtimeout(void)
++{
++ return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
++}
++
++EXPORT_SYMBOL(schedule_min_hrtimeout);
++
++long __sched schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ __set_current_state(TASK_INTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
++
++long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 2541bd89f20e..31506b34cc2a 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -830,7 +830,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ tsk_expires->virt_exp = expires;
+
+ tsk_expires->sched_exp = check_timers_list(++timers, firing,
+- tsk->se.sum_exec_runtime);
++ tsk_seruntime(tsk));
+
+ /*
+ * Check for the special case thread timers.
+@@ -840,7 +840,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+
+ if (hard != RLIM_INFINITY &&
+- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+@@ -852,7 +852,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+@@ -1096,7 +1096,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ struct task_cputime task_sample;
+
+ task_cputime(tsk, &task_sample.utime, &task_sample.stime);
+- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
++ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
+ if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+ return 1;
+ }
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 4a4fd567fb26..a5f11b8d8386 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -44,6 +44,7 @@
+ #include <linux/sched/debug.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/freezer.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -1479,7 +1480,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
+ * Check, if the next hrtimer event is before the next timer wheel
+ * event:
+ */
+-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
++static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
+ {
+ u64 nextevt = hrtimer_get_next_event();
+
+@@ -1497,6 +1498,9 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
+ if (nextevt <= basem)
+ return basem;
+
++ if (nextevt < expires && nextevt - basem <= TICK_NSEC)
++ base->is_idle = false;
++
+ /*
+ * Round up to the next jiffie. High resolution timers are
+ * off, so the hrtimers are expired in the tick and we need to
+@@ -1566,7 +1570,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ }
+ raw_spin_unlock(&base->lock);
+
+- return cmp_next_hrtimer_event(basem, expires);
++ return cmp_next_hrtimer_event(base, basem, expires);
+ }
+
+ /**
+@@ -1795,6 +1799,18 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ expire = timeout + jiffies;
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ /*
++ * Special case 1 as being a request for the minimum timeout
++ * and use highres timers to timeout after 1ms to workaround
++ * the granularity of low Hz tick timers.
++ */
++ if (!schedule_min_hrtimeout())
++ return 0;
++ goto out_timeout;
++ }
++#endif
+ timer.task = current;
+ timer_setup_on_stack(&timer.timer, process_timeout, 0);
+ __mod_timer(&timer.timer, expire, 0);
+@@ -1803,10 +1819,10 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ /* Remove the timer from the object tracker */
+ destroy_timer_on_stack(&timer.timer);
+-
++out_timeout:
+ timeout = expire - jiffies;
+
+- out:
++out:
+ return timeout < 0 ? 0 : timeout;
+ }
+ EXPORT_SYMBOL(schedule_timeout);
+@@ -1947,7 +1963,19 @@ void __init init_timers(void)
+ */
+ void msleep(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ /*
++ * Use high resolution timers where the resolution of tick based
++ * timers is inadequate.
++ */
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
++ while (msecs)
++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
++ return;
++ }
++ timeout = jiffs + 1;
+
+ while (timeout)
+ timeout = schedule_timeout_uninterruptible(timeout);
+@@ -1961,7 +1989,15 @@ EXPORT_SYMBOL(msleep);
+ */
+ unsigned long msleep_interruptible(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
++ while (msecs && !signal_pending(current))
++ msecs = schedule_msec_hrtimeout_interruptible(msecs);
++ return msecs;
++ }
++ timeout = jiffs + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 11e9daa4a568..4c4e1d5bdf42 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1041,10 +1041,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ /* Make this a -deadline thread */
+ static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_MUQSS
++ /* No deadline on MuQSS, use RR */
++ .sched_policy = SCHED_RR,
++#else
+ .sched_policy = SCHED_DEADLINE,
+ .sched_runtime = 100000ULL,
+ .sched_deadline = 10000000ULL,
+ .sched_period = 10000000ULL
++#endif
+ };
+ struct wakeup_test_data *x = data;
+
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index c43ec2271469..77d699f14af7 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -1016,6 +1016,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ }
+ return nelems;
+ }
++EXPORT_SYMBOL(swiotlb_map_sg_attrs);
+
+ /*
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index cd5dc3faaa57..97e1a0ebf656 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -149,7 +149,7 @@ struct scan_control {
+ /*
+ * From 0 .. 100. Higher means more swappy.
+ */
+-int vm_swappiness = 60;
++int vm_swappiness = 33;
+ /*
+ * The total number of pages which are beyond the high watermark within all
+ * zones.
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index b8ab5c829511..d07e1acae483 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1901,7 +1901,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
+ mutex_unlock(&pktgen_thread_lock);
+ pr_debug("%s: waiting for %s to disappear....\n",
+ __func__, ifname);
+- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
++ schedule_msec_hrtimeout_interruptible((msec_per_try));
+ mutex_lock(&pktgen_thread_lock);
+
+ if (++i >= max_tries) {
+diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
+index 8f20dec97843..944ce63431b0 100644
+--- a/sound/pci/maestro3.c
++++ b/sound/pci/maestro3.c
+@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(0, io + GPIO_DATA);
+ outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
++ schedule_msec_hrtimeout_uninterruptible((delay1));
+
+ outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
+ udelay(5);
+@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
+ outw(~0, io + GPIO_MASK);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
++ schedule_msec_hrtimeout_uninterruptible((delay2));
+
+ if (! snd_m3_try_read_vendor(chip))
+ break;
+diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
+index 55b04c55fb4b..2ed02ad6ac41 100644
+--- a/sound/soc/codecs/rt5631.c
++++ b/sound/soc/codecs/rt5631.c
+@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+ /* config one-bit depop parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
+ snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
+@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ /* config depop sequence parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
+diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
+index fc79c6725d06..f5017b46b2db 100644
+--- a/sound/soc/codecs/wm8350.c
++++ b/sound/soc/codecs/wm8350.c
+@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
+ out2->ramp == WM8350_RAMP_UP) {
+ /* delay is longer over 0dB as increases are larger */
+ if (i >= WM8350_OUTn_0dB)
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (2));
+ else
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (1));
+ } else
+ udelay(50); /* doesn't matter if we delay longer */
+@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ (platform->dis_out4 << 6));
+
+ /* wait for discharge */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ cap_discharge_msecs));
+
+@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ WM8350_VBUFEN);
+
+ /* wait for vmid */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_charge_msecs));
+
+@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_discharge_msecs));
+
+@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ pm1 | WM8350_OUTPUT_DRAIN_EN);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->drain_msecs));
+
+ pm1 &= ~WM8350_BIASEN;
+diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
+index c77b49a29311..fc50456e90a9 100644
+--- a/sound/soc/codecs/wm8900.c
++++ b/sound/soc/codecs/wm8900.c
+@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
+ /* Need to let things settle before stopping the clock
+ * to ensure that restart works, see "Stopping the
+ * master clock" in the datasheet. */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_write(codec, WM8900_REG_POWER2,
+ WM8900_REG_POWER2_SYSCLK_ENA);
+ break;
+diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
+index df7220656d98..93aaf7fae51e 100644
+--- a/sound/soc/codecs/wm9713.c
++++ b/sound/soc/codecs/wm9713.c
+@@ -203,7 +203,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
+
+ /* Gracefully shut down the voice interface. */
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
+ snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
+
+@@ -872,7 +872,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
+ wm9713->pll_in = freq_in;
+
+ /* wait 10ms AC97 link frames for the link to stabilise */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ return 0;
+ }
+
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 92894d9cac19..fc10bd070706 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
+ static void pop_wait(u32 pop_time)
+ {
+ if (pop_time)
+- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
++ schedule_msec_hrtimeout_uninterruptible((pop_time));
+ }
+
+ static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index b3854f8c0c67..d6bd68b381f3 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index e6acc281dd37..9a307c14f6b6 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -31,7 +31,7 @@ INCLUDES := -I$(srctree)/tools/include \
+ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
+ WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+-CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
++CFLAGS += -Wall $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+ LDFLAGS += -lelf $(LIBSUBCMD)
+
+ # Allow old libelf to be used:
diff --git a/regdom-hack-4.16.6.patch b/regdom-hack-4.16.6.patch
new file mode 100644
index 0000000..76a8db5
--- /dev/null
+++ b/regdom-hack-4.16.6.patch
@@ -0,0 +1,63 @@
+--- a/drivers/net/wireless/ath/Kconfig
++++ b/drivers/net/wireless/ath/Kconfig
+@@ -22,6 +22,13 @@
+
+ if WLAN_VENDOR_ATH
+
++config ATH_USER_REGD
++ bool "Do not enforce EEPROM regulatory restrictions"
++ ---help---
++ Say Y, if you want to ignore the EEPROM regulatory restrictions
++ in order to change the wireless region from userspace.
++
++
+ config ATH_DEBUG
+ bool "Atheros wireless debugging"
+ ---help---
+--- a/drivers/net/wireless/ath/regd.c
++++ b/drivers/net/wireless/ath/regd.c
+@@ -345,6 +345,10 @@
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
++#ifdef CONFIG_ATH_USER_REGD
++ return;
++#endif
++
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+@@ -378,6 +382,10 @@
+ {
+ struct ieee80211_supported_band *sband;
+
++#ifdef CONFIG_ATH_USER_REGD
++ return;
++#endif
++
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ return;
+@@ -407,6 +415,10 @@
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
++#ifdef CONFIG_ATH_USER_REGD
++ return;
++#endif
++
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
+ return;
+
+@@ -639,6 +651,11 @@
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
++
++#ifdef CONFIG_ATH_USER_REGD
++ return 0;
++#endif
++
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
+ REGULATORY_CUSTOM_REG;
+