Synchronize codes for OnePlus 8T Oxygen OS 11.0.8.11.KB05AA and OnePlus 8 Oxygen OS 11.0.5.5.IN21AA and OnePlus 8 Pro Oxygen OS 11.0.5.5.IN11AA

Change-Id: Ib6789497152a88c7684cf1f80404327b8bccbd86
This commit is contained in:
yu.huang 2021-04-21 23:21:01 +08:00 committed by yu huang
parent 2ee2431c39
commit 2cb46758ae
573 changed files with 3799 additions and 107529 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 113
SUBLEVEL = 110
EXTRAVERSION =
NAME = "People's Front"
@ -860,7 +860,7 @@ LD_FLAGS_LTO_CLANG := -mllvm -import-instr-limit=5
KBUILD_LDFLAGS += $(LD_FLAGS_LTO_CLANG)
KBUILD_LDFLAGS_MODULE += $(LD_FLAGS_LTO_CLANG)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/scripts/module-lto.lds
KBUILD_LDS_MODULE += $(srctree)/scripts/module-lto.lds
# allow disabling only clang LTO where needed
DISABLE_LTO_CLANG := -fno-lto

View File

@ -1,4 +1,26 @@
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY_NOT_OEM),y)
dtbo-$(CONFIG_ARCH_KONA) += \
kona-cdp-overlay.dtbo \
kona-cdp-lcd-overlay.dtbo \
kona-mtp-overlay.dtbo \
kona-mtp-ws-overlay.dtbo \
kona-xr-overlay.dtbo \
kona-rumi-overlay.dtbo \
kona-qrd-overlay.dtbo \
kona-xrfusion-overlay.dtbo \
kona-hdk-overlay.dtbo
kona-cdp-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-cdp-lcd-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-mtp-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-mtp-ws-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-xr-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-rumi-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-qrd-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-xrfusion-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kona-hdk-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
else
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
dtbo-$(CONFIG_ARCH_KONA) += \
instantnoodlep-overlay-evb.dtbo \
instantnoodlep-overlay-t0.dtbo \
@ -26,6 +48,32 @@ instantnoodlev-overlay-evt1.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
instantnoodlev-overlay-dvt.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
kebab-overlay.dtbo-base := kona.dtb kona-v2.dtb kona-v2.1.dtb
else
dtb-$(CONFIG_ARCH_KONA) += kona-rumi.dtb \
kona-mtp.dtb \
kona-mtp-ws.dtb \
kona-xr.dtb \
kona-xrfusion.dtb \
kona-cdp.dtb \
kona-cdp-lcd.dtb \
kona-qrd.dtb \
kona-v2-rumi.dtb \
kona-v2-mtp.dtb \
kona-v2-mtp-ws.dtb \
kona-v2-cdp.dtb \
kona-v2-qrd.dtb \
kona-v2-xrfusion.dtb \
kona-hdk.dtb \
kona-v2.1-mtp.dtb \
kona-v2.1-mtp-ws.dtb \
kona-v2.1-cdp.dtb \
kona-v2.1-qrd.dtb \
kona-v2.1-hdk.dtb \
kona-v2.1-xrfusion.dtb \
qrb5165-iot-rb5.dtb \
kona-v2.1-iot-rb5.dtb
endif
endif

View File

@ -57,7 +57,6 @@
qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-status-value = <0x9C>;
qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-lp11-init;
qcom,mdss-bl-high2bit;
qcom,mdss-loading-effect;
@ -332,24 +331,6 @@
39 01 00 00 00 00 02 C2 27
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-dimming-setting-mode-1-command = [
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 06
39 01 00 00 00 00 02 B7 06
39 01 00 00 00 00 02 B0 05
39 01 00 00 00 00 02 B7 93
39 01 00 00 00 00 03 F0 A5 A5
39 01 00 00 00 00 02 53 28
];
qcom,mdss-dsi-dimming-setting-mode-0-command = [
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 06
39 01 00 00 00 00 02 B7 01
39 01 00 00 00 00 02 B0 05
39 01 00 00 00 00 02 B7 13
39 01 00 00 00 00 03 F0 A5 A5
39 01 00 00 00 00 02 53 20
];
qcom,mdss-dsi-seed-command = [
39 01 00 00 00 00 02 81 90
39 01 00 00 00 00 03 F0 5A 5A
@ -366,17 +347,30 @@
39 01 00 00 00 00 02 B1 00
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-panel-register-read-command = [
06 01 00 00 00 00 01 DA
];
qcom,mdss-dsi-panel-level2-key-enable-command = [
39 01 00 00 00 00 03 F0 5A 5A
];
qcom,mdss-dsi-panel-gamma-change-write-command = [
39 01 00 00 00 00 03 F0 5A 5A
15 01 00 00 00 00 02 B0 AE
39 01 00 00 00 00 09 B9 0C E3 C4 E5 0D A3 D1 01
15 01 00 00 00 00 02 B0 83
39 01 00 00 00 00 09 B9 0E 24 1C FB 0E D4 39 0D
15 01 00 00 00 00 02 B0 58
39 01 00 00 00 00 09 B9 0F A4 81 19 10 24 99 25
15 01 00 00 00 00 02 B0 2D
39 01 00 00 00 00 09 B9 0F 94 C9 2C 10 84 F1 3A
15 01 00 00 00 00 02 B0 DD
39 01 00 00 00 00 09 B9 0E 73 C5 08 10 24 05 28
15 01 00 00 00 00 02 F7 0F
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-panel-level2-key-disable-command = [
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-timing-switch-command-state = "dsi_hs_mode";
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
@ -401,14 +395,12 @@
qcom,mdss-dsi-panel-hbm-off-aod-on-command-state = "dsi_hs_mode";
qcom,mdss-dsi-loading-effect-enable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-loading-effect-disable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-dimming-setting-mode-1-command-state = "dsi_lp_mode";
qcom,mdss-dsi-dimming-setting-mode-0-command-state = "dsi_lp_mode";
qcom,mdss-dsi-seed-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-register-read-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-level2-key-enable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-level2-key-disable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-gamma-change-write-command-state = "dsi_lp_mode";
qcom,panel-roi-alignment = <540 30 540 30 540 30>;
qcom,lm-split = <540 540>;
qcom,compression-mode = "dsc";
@ -678,24 +670,6 @@
39 01 00 00 00 00 02 C2 27
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-dimming-setting-mode-1-command = [
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 06
39 01 00 00 00 00 02 B7 06
39 01 00 00 00 00 02 B0 05
39 01 00 00 00 00 02 B7 93
39 01 00 00 00 00 03 F0 A5 A5
39 01 00 00 00 00 02 53 28
];
qcom,mdss-dsi-dimming-setting-mode-0-command = [
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 06
39 01 00 00 00 00 02 B7 01
39 01 00 00 00 00 02 B0 05
39 01 00 00 00 00 02 B7 13
39 01 00 00 00 00 03 F0 A5 A5
39 01 00 00 00 00 02 53 20
];
qcom,mdss-dsi-seed-command = [
39 01 00 00 00 00 02 81 90
39 01 00 00 00 00 03 F0 5A 5A
@ -712,17 +686,30 @@
39 01 00 00 00 00 02 B1 00
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-panel-register-read-command = [
06 01 00 00 00 00 01 DA
];
qcom,mdss-dsi-panel-level2-key-enable-command = [
39 01 00 00 00 00 03 F0 5A 5A
];
qcom,mdss-dsi-panel-gamma-change-write-command = [
39 01 00 00 00 00 03 F0 5A 5A
15 01 00 00 00 00 02 B0 AE
39 01 00 00 00 00 09 B9 0C E3 C4 E5 0D A3 D1 01
15 01 00 00 00 00 02 B0 83
39 01 00 00 00 00 09 B9 0E 24 1C FB 0E D4 39 0D
15 01 00 00 00 00 02 B0 58
39 01 00 00 00 00 09 B9 0F A4 81 19 10 24 99 25
15 01 00 00 00 00 02 B0 2D
39 01 00 00 00 00 09 B9 0F 94 C9 2C 10 84 F1 3A
15 01 00 00 00 00 02 B0 DD
39 01 00 00 00 00 09 B9 0E 73 C5 08 10 24 05 28
15 01 00 00 00 00 02 F7 0F
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-panel-level2-key-disable-command = [
39 01 00 00 00 00 03 F0 A5 A5
];
qcom,mdss-dsi-timing-switch-command-state = "dsi_hs_mode";
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
@ -747,14 +734,12 @@
qcom,mdss-dsi-panel-hbm-off-aod-on-command-state = "dsi_hs_mode";
qcom,mdss-dsi-loading-effect-enable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-loading-effect-disable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-dimming-setting-mode-1-command-state = "dsi_lp_mode";
qcom,mdss-dsi-dimming-setting-mode-0-command-state = "dsi_lp_mode";
qcom,mdss-dsi-seed-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-register-read-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-level2-key-enable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-level2-key-disable-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-gamma-change-write-command-state = "dsi_lp_mode";
qcom,panel-roi-alignment = <540 30 540 30 540 30>;
qcom,lm-split = <540 540>;
qcom,compression-mode = "dsc";

View File

@ -169,11 +169,6 @@
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 08
39 01 00 00 00 00 02 F2 A0
39 01 00 00 00 00 03 F0 A5 A5
/* Fast Discharge */
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 03
39 01 00 00 00 00 02 DD 40
39 01 00 00 6E 00 03 F0 A5 A5
/* T2M CLK fix Setting */
39 01 00 00 00 00 03 F0 5A 5A
@ -518,11 +513,6 @@
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 08
39 01 00 00 00 00 02 F2 A0
39 01 00 00 00 00 03 F0 A5 A5
/* Fast Discharge */
39 01 00 00 00 00 03 F0 5A 5A
39 01 00 00 00 00 02 B0 03
39 01 00 00 00 00 02 DD 40
39 01 00 00 6E 00 03 F0 A5 A5
/* T2M CLK fix Setting */
39 01 00 00 00 00 03 F0 5A 5A

View File

@ -15,7 +15,7 @@
compatible = "qcom,kona-mtp", "qcom,kona", "qcom,mtp";
qcom,board-id = <8 0>;
/*we can add project id to this array,uefi can auto read it,if new project,we add to this array */
oem,project-id = <19821 19855>;
oem,project-id = <19821 19855 19868>;
/*we can add hw id to this array,uefi can auto read it,if new hw,we add to this array */
oem,hw-id = <14 15 55>;
};

View File

@ -101,7 +101,6 @@
};
};
/* @bsp, Battery & Charging config STRAT */
&qupv3_se16_i2c {
oneplus_fastchg@26{
op,4320mAh_4p45_support;
@ -201,9 +200,7 @@
&kona_mtp_batterydata {
#include "OP-fg-batterydata-4320mah.dtsi"
};
/* @bsp, Battery & Charging config EDN */
/* @bsp, 2019/09/24 usb config START*/
&usb2_phy0 {
qcom,param-override-seq =
<0x67 0x6c/*Disconnection voltage +21.56%*/
@ -362,4 +359,3 @@
status = "disabled";
};
};
/* @bsp, 2019/09/24 usb config END*/

View File

@ -1,5 +1,4 @@
/*this is for different project dtsi*/
/* @bsp, 2019/09/10 Wireless Charging porting STRAT */
&op_wlchg {
status = "okay";
op,max-voltage-mv = <4550>;
@ -63,9 +62,9 @@
/* fod parameter*/
op,fastchg-fod-enable;
op,fastchg-match-q = /bits/ 8 <0x44>;
op,fastchg-fod-parm = /bits/ 8 <0xac 0x32 0xac 0x28 0xa0 0x1e
0x9a 0x37 0x9a 0x32 0x9f 0xc4>;
op,fastchg-match-q = /bits/ 8 <0x56>;
op,fastchg-fod-parm = /bits/ 8 <0xac 0x32 0xac 0x28 0xa0 0xba
0x9a 0xd3 0x9a 0xce 0x9f 0xf6>;
op,fastchg-fod-parm-startup = /bits/ 8 <0xac 0x7f 0xac 0x28 0xa0 0x1e
0x9a 0x37 0x9a 0x32 0x9f 0xc4>;
@ -104,7 +103,7 @@
otg_en-gpio = <&pm8150l_gpios 4 0x00>;
vbus-gpio = <&tlmm 172 0x00>;
};
/* @bsp, 2019/09/10 Wireless Charging porting END */
&qupv3_se13_i2c {
status = "ok";
sec-s6sy761@48 {
@ -223,9 +222,7 @@
&kona_mtp_batterydata {
#include "OP-fg-batterydata-4510mah.dtsi"
};
/* @bsp, Battery & Charging config END */
/* @bsp, 2019/10/08 usb config START*/
&usb2_phy0 {
qcom,param-override-seq =
<0x67 0x6c/*Disconnection voltage +21.56%*/
@ -386,4 +383,3 @@
status = "disabled";
};
};
/* @bsp, 2019/10/08 usb config END*/

View File

@ -1,7 +1,6 @@
/*this is for different project dtsi*/
#include "instantnoodle.dtsi"
#include "kona-thermal.dtsi"
/* @bsp, Battery & Charging config STRAT */
&qupv3_se16_i2c {
oneplus_fastchg@26{
op,4300mAh_4p45_support;
@ -102,7 +101,6 @@
&kona_mtp_batterydata {
#include "OP-fg-batterydata-4300mah.dtsi"
};
/* @bsp, Battery & Charging config STRAT */
/delete-node/ &skin_therm_19821;
/delete-node/ &msm_therm_19821;
@ -116,29 +114,29 @@
wake-capable-sensor;
trips {
skin_therm_usr_trip0: skin_therm_usr_trip0 {
temperature = <50000>;
hysteresis = <7000>;
type = "passive";
};
skin_therm_usr_trip1: skin_therm_usr_trip1 {
temperature = <54000>;
hysteresis = <9000>;
type = "passive";
};
skin_therm_usr_trip3: skin_therm_usr_trip3 {
temperature = <58000>;
hysteresis = <7000>;
type = "passive";
};
skin_therm_usr_trip1: skin_therm_usr_trip1 {
temperature = <61000>;
hysteresis = <8000>;
type = "passive";
};
skin_therm_usr_trip3: skin_therm_usr_trip3 {
temperature = <65000>;
hysteresis = <8000>;
type = "passive";
};
};
cooling-maps {
modem_Tj_lvl1 {
modem_skin_lvl0 {
trip = <&skin_therm_usr_trip0>;
cooling-device = <&modem_tj 1 1>;
cooling-device = <&modem_skin 1 1>;
};
modem_skin_lvl1 {
trip = <&skin_therm_usr_trip1>;
cooling-device = <&modem_skin 1 1>;
cooling-device = <&modem_skin 2 2>;
};
modem_skin_lvl3 {
trip = <&skin_therm_usr_trip3>;
@ -165,7 +163,7 @@
type = "passive";
};
mmw_pa1_usr_trip2: mmw_pa1_usr_trip2 {
temperature = <52000>;
temperature = <100000>;
hysteresis = <5000>;
type = "passive";
};
@ -204,7 +202,7 @@
type = "passive";
};
xo_therm_usr_trip2: xo_therm_usr_trip2 {
temperature = <60000>;
temperature = <100000>;
hysteresis = <5000>;
type = "passive";
};
@ -244,7 +242,7 @@
type = "passive";
};
modem_mmw2_usr_trip2: modem_mmw2_usr_trip2{
temperature = <60000>;
temperature = <100000>;
hysteresis = <5000>;
type = "passive";
};

View File

@ -564,7 +564,7 @@
oem_serial_pinctrl {
compatible = "oem,oem_serial_pinctrl";
pinctrl-names = "uart_pinctrl_active","uart_pinctrl_deactive";
pinctrl-0 = <&qupv3_se12_2uart_active>;
pinctrl-0 = <&qupv3_se12_2uart_oem_sleep>;
pinctrl-1 = <&qupv3_se12_2uart_oem_sleep>;
};
@ -667,6 +667,7 @@
config {
pins = "gpio34", "gpio35";
drive-strength = <2>;
input-enable;
bias-pull-down;
};
};
@ -736,7 +737,7 @@
};
};
/* @bsp, 2019/07/08 Battery & Charging porting STRAT */
&qupv3_se16_i2c {
qcom,clk-freq-out = <100000>;
status = "ok";
@ -1229,7 +1230,6 @@
};
};
};
/* @bsp, 2019/07/08 Battery & Charging porting END */
&spmi_bus {
qcom,pm8150@0 {
@ -1284,10 +1284,7 @@
qca,bt-vdd-rfa1-voltage-level = <1900000 2040000>;
};
/* @bsp, usb config START */
/* @bsp, As QRD-DVT have this config, keep the same config
* for ldo18 power suspend
*/
&usb_qmp_dp_phy {
vdd-supply = <&pm8150_l18>;
qcom,vdd-voltage-level = <0 912000 912000>;
@ -1302,7 +1299,7 @@
};
};
};
/* @bsp, usb config END */
&wdog{
qcom,bark-time = <15000>;

View File

@ -17,6 +17,6 @@
/*we can add project id to this array,uefi can auto read it,if new project,we add to this array */
oem,project-id = <19805 20809>;
/*we can add hw id to this array,uefi can auto read it,if new hw,we add to this array */
oem,hw-id = <11 12 13 14 15 51 52 53 54 55>;
oem,hw-id = <11 12 13 14 15 21 22 51 52 53 54 55>;
};

View File

@ -203,7 +203,7 @@
};
};
};
/* @bsp, Battery & Charging config STRAT */
&qupv3_se16_i2c {
oneplus_fastchg@0a{
op,swarp_supported;
@ -312,9 +312,8 @@
&kona_mtp_batterydata {
#include "OP-fg-batterydata-4500mah.dtsi"
};
/* @bsp, Battery & Charging config EDN */
/* @bsp, 2019/09/24 usb config START*/
&usb2_phy0 {
qcom,param-override-seq =
<0x67 0x6c/*Disconnection voltage +21.56%*/
@ -478,4 +477,3 @@
status = "disabled";
};
};
/* @bsp, 2019/09/24 usb config END*/

View File

@ -586,16 +586,9 @@
skin-therm-usr {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-governor = "user_space";
thermal-governor = "step_wise";
thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
wake-capable-sensor;
trips {
active-config0 {
temperature = <125000>;
hysteresis = <1000>;
type = "passive";
};
};
};
mmw-pa1-usr {

View File

@ -793,7 +793,6 @@
};
};
/* @bsp, 2019/07/08 Battery & Charging porting STRAT */
&qupv3_se16_i2c {
qcom,clk-freq-out = <100000>;
status = "ok";
@ -1138,9 +1137,7 @@
};
};
};
/* @bsp, 2019/07/08 Battery & Charging porting END */
/* @bsp, 2019/08/30 Wireless Charging porting STRAT */
&soc {
op_wlchg:oneplus_wlchg {
status = "disable";
@ -1572,7 +1569,6 @@
};
};
};
/* @bsp, 2019/08/30 Wireless Charging porting END */
&spmi_bus {
qcom,pm8150@0 {
@ -1623,10 +1619,7 @@
qca,bt-vdd-rfa1-voltage-level = <1900000 2040000>;
};
/* @bsp, usb config START */
/* @bsp, As QRD-DVT have this config, keep the same config
* for ldo18 power suspend
*/
&usb_qmp_dp_phy {
vdd-supply = <&pm8150_l18>;
qcom,vdd-voltage-level = <0 912000 912000>;
@ -1641,7 +1634,6 @@
};
};
};
/* @bsp, usb config END */
&wdog{
qcom,bark-time = <15000>;

View File

@ -77,9 +77,7 @@ CONFIG_ARM_SCPI_PROTOCOL=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_JUMP_LABEL=y
CONFIG_LTO_CLANG=y
CONFIG_CFI_CLANG=y
CONFIG_SHADOW_CALL_STACK=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@ -313,10 +311,6 @@ CONFIG_MEDIA_CONTROLLER=y
# CONFIG_VGA_ARB is not set
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_HRTIMER=y

View File

@ -356,7 +356,6 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_QTI_HAPTICS=y
@ -731,6 +730,7 @@ CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_DEFRAG=y
CONFIG_FSC=y
CONFIG_OP_FREEZER=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y

View File

@ -370,7 +370,6 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_QTI_HAPTICS=y
@ -756,7 +755,6 @@ CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
@ -814,6 +812,7 @@ CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_DEFRAG=y
CONFIG_FSC=y
CONFIG_OP_FREEZER=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM4X=y

View File

@ -32,10 +32,6 @@ extern void __cpu_copy_user_page(void *to, const void *from,
extern void copy_page(void *to, const void *from);
extern void clear_page(void *to);
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)

View File

@ -970,29 +970,11 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
/*
* The number of CPUs online, not counting this CPU (which may not be
* fully online and so not counted in num_online_cpus()).
*/
static inline unsigned int num_other_online_cpus(void)
{
unsigned int this_cpu_online = cpu_online(smp_processor_id());
return num_online_cpus() - this_cpu_online;
}
static inline unsigned int num_other_active_cpus(void)
{
unsigned int this_cpu_active = cpu_active(smp_processor_id());
return num_active_cpus() - this_cpu_active;
}
void smp_send_stop(void)
{
unsigned long timeout;
if (num_other_online_cpus()) {
if (num_online_cpus() > 1) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
@ -1005,10 +987,10 @@ void smp_send_stop(void)
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
while (num_other_active_cpus() && timeout--)
while (num_active_cpus() > 1 && timeout--)
udelay(1);
if (num_other_active_cpus())
if (num_active_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
@ -1031,11 +1013,7 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
/*
* If this cpu is the only one alive at this point in time, online or
* not, there are no stop messages to be sent around, so just back out.
*/
if (num_other_online_cpus() == 0) {
if (num_online_cpus() == 1) {
sdei_mask_local_cpu();
return;
}
@ -1043,7 +1021,7 @@ void crash_smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);

View File

@ -399,26 +399,13 @@ static phys_addr_t pgd_pgtable_alloc(void)
return __pa(ptr);
}
/**
* create_pgtable_mapping - create a pagetable mapping for given
* physical start and end addresses.
* @start: physical start address.
* @end: physical end address.
*/
void create_pgtable_mapping(phys_addr_t start, phys_addr_t end)
{
unsigned long virt = (unsigned long)phys_to_virt(start);
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&start, virt);
return;
}
__create_pgd_mapping(init_mm.pgd, start, virt, end - start,
PAGE_KERNEL, NULL, 0);
}
EXPORT_SYMBOL_GPL(create_pgtable_mapping);
/*
* This function can only be used to modify existing table entries,

View File

@ -525,13 +525,12 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
*/
entity = &bfqg->entity;
for_each_entity(entity) {
struct bfq_group *curr_bfqg = container_of(entity,
struct bfq_group, entity);
if (curr_bfqg != bfqd->root_group) {
parent = bfqg_parent(curr_bfqg);
bfqg = container_of(entity, struct bfq_group, entity);
if (bfqg != bfqd->root_group) {
parent = bfqg_parent(bfqg);
if (!parent)
parent = bfqd->root_group;
bfq_group_set_parent(curr_bfqg, parent);
bfq_group_set_parent(bfqg, parent);
}
}

View File

@ -58,14 +58,12 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
}
#endif
static bool acpi_no_watchdog;
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
if (acpi_disabled || acpi_no_watchdog)
if (acpi_disabled)
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
@ -93,14 +91,6 @@ bool acpi_has_watchdog(void)
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
/* ACPI watchdog can be disabled on boot command line */
static int __init disable_acpi_watchdog(char *str)
{
acpi_no_watchdog = true;
return 1;
}
__setup("acpi_no_watchdog", disable_acpi_watchdog);
void __init acpi_watchdog_init(void)
{
const struct acpi_wdat_entry *entries;

View File

@ -201,7 +201,7 @@ static int ghes_estatus_pool_expand(unsigned long len)
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_mappings();
vmalloc_sync_all();
return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
}

View File

@ -243,7 +243,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_work {
struct list_head entry;
enum {
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
@ -903,27 +903,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
return w;
}
/**
* binder_dequeue_work_head() - Dequeues the item at head of list
* @proc: binder_proc associated with list
* @list: list to dequeue head
*
* Removes the head of the list if there are items on the list
*
* Return: pointer dequeued binder_work, NULL if list was empty
*/
static struct binder_work *binder_dequeue_work_head(
struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
binder_inner_proc_unlock(proc);
return w;
}
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
@ -4672,13 +4651,17 @@ static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
enum binder_work_type wtype;
while (1) {
w = binder_dequeue_work_head(proc, list);
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
wtype = w ? w->type : 0;
binder_inner_proc_unlock(proc);
if (!w)
return;
switch (w->type) {
switch (wtype) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@ -4712,9 +4695,11 @@ static void binder_release_work(struct binder_proc *proc,
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
case BINDER_WORK_NODE:
break;
default:
pr_err("unexpected work type, %d, not freed\n",
w->type);
wtype);
break;
}
}
@ -6093,6 +6078,7 @@ int binder_state_show(struct seq_file *m, void *unused)
}
#ifdef CONFIG_OP_FREEZER
// add for op freeze manager
static void op_freezer_check_uid_proc_status(struct binder_proc *proc)
{
struct rb_node *n = NULL;

View File

@ -118,7 +118,7 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
if (link->flags == DL_FLAG_SYNC_STATE_ONLY)
continue;
if (link->consumer == target)
@ -131,50 +131,6 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
}
static void device_link_init_status(struct device_link *link,
struct device *consumer,
struct device *supplier)
{
switch (supplier->links.status) {
case DL_DEV_PROBING:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* A consumer driver can create a link to a supplier
* that has not completed its probing yet as long as it
* knows that the supplier is already functional (for
* example, it has just acquired some resources from the
* supplier).
*/
link->status = DL_STATE_CONSUMER_PROBE;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
break;
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
@ -191,7 +147,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
if (link->flags == DL_FLAG_SYNC_STATE_ONLY)
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@ -219,14 +175,6 @@ void device_pm_move_to_tail(struct device *dev)
device_links_read_unlock(idx);
}
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
@ -241,38 +189,14 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
* If DL_FLAG_STATELESS is set in @flags, the caller of this function is
* expected to release the link returned by it directly with the help of either
* device_link_del() or device_link_remove().
* If the DL_FLAG_AUTOREMOVE_CONSUMER flag is set, the link will be removed
* automatically when the consumer device driver unbinds from it. Analogously,
* if DL_FLAG_AUTOREMOVE_SUPPLIER is set in @flags, the link will be removed
* automatically when the supplier device driver unbinds from it.
*
* If that flag is not set, however, the caller of this function is handing the
* management of the link over to the driver core entirely and its return value
* can only be used to check whether or not the link is present. In that case,
* the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
* flags can be used to indicate to the driver core when the link can be safely
* deleted. Namely, setting one of them in @flags indicates to the driver core
* that the link is not going to be used (by the given caller of this function)
* after unbinding the consumer or supplier driver, respectively, from its
* device, so the link can be deleted at that point. If none of them is set,
* the link will be maintained until one of the devices pointed to by it (either
* the consumer or the supplier) is unregistered.
*
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
* be used to request the driver core to automaticall probe for a consmer
* driver after successfully binding a driver to the supplier device.
*
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
* DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
* the same time is invalid and will cause NULL to be returned upfront.
* However, if a device link between the given @consumer and @supplier pair
* exists already when this function is called for them, the existing link will
* be returned regardless of its current type and status (the link's flags may
* be modified then). The caller of this function is then expected to treat
* the link as though it has just been created, so (in particular) if
* DL_FLAG_STATELESS was passed in @flags, the link needs to be released
* explicitly when not needed any more (as stated above).
* The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
* or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
* will cause NULL to be returned upfront.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@ -288,13 +212,11 @@ struct device_link *device_link_add(struct device *consumer,
{
struct device_link *link;
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
if (!consumer || !supplier ||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
flags != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
(flags & DL_FLAG_STATELESS &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER)))
return NULL;
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
@ -304,9 +226,6 @@ struct device_link *device_link_add(struct device *consumer,
}
}
if (!(flags & DL_FLAG_STATELESS))
flags |= DL_FLAG_MANAGED;
device_links_write_lock();
device_pm_lock();
@ -324,18 +243,21 @@ struct device_link *device_link_add(struct device *consumer,
goto out;
}
/*
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
* together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer != consumer)
continue;
if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
link = NULL;
goto out;
}
if (flags & DL_FLAG_AUTOREMOVE_CONSUMER)
link->flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
@ -345,42 +267,13 @@ struct device_link *device_link_add(struct device *consumer,
refcount_inc(&link->rpm_active);
}
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
!(link->flags & DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
goto out;
}
}
kref_get(&link->kref);
/*
* If the life time of the link following from the new flags is
* longer than indicated by the flags of the existing link,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
if (!(link->flags & DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
}
goto out;
}
@ -407,25 +300,37 @@ struct device_link *device_link_add(struct device *consumer,
kref_init(&link->kref);
/* Determine the initial link state. */
if (flags & DL_FLAG_STATELESS)
if (flags & DL_FLAG_STATELESS) {
link->status = DL_STATE_NONE;
else
device_link_init_status(link, consumer, supplier);
} else {
switch (supplier->links.status) {
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
if (flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
/*
* Some callers expect the link creation during consumer driver probe to
* resume the supplier even without DL_FLAG_RPM_ACTIVE.
*/
if (link->status == DL_STATE_CONSUMER_PROBE &&
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
dev_dbg(consumer,
"Linked as a sync state only consumer to %s\n",
dev_name(supplier));
goto out;
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
if (flags & DL_FLAG_SYNC_STATE_ONLY)
goto out;
reorder:
/*
* Move the consumer and all of the devices depending on it to the end
@ -567,16 +472,8 @@ static void __device_link_del(struct kref *kref)
}
#endif /* !CONFIG_SRCU */
static void device_link_put_kref(struct device_link *link)
{
if (link->flags & DL_FLAG_STATELESS)
kref_put(&link->kref, __device_link_del);
else
WARN(1, "Unable to drop a managed device link reference\n");
}
/**
* device_link_del - Delete a stateless link between two devices.
* device_link_del - Delete a link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
@ -588,14 +485,14 @@ void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
device_link_put_kref(link);
kref_put(&link->kref, __device_link_del);
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
/**
* device_link_remove - Delete a stateless link between two devices.
* device_link_remove - remove a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
*
@ -614,7 +511,7 @@ void device_link_remove(void *consumer, struct device *supplier)
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
device_link_put_kref(link);
kref_put(&link->kref, __device_link_del);
break;
}
}
@ -647,7 +544,7 @@ static void device_links_missing_supplier(struct device *dev)
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
@ -669,7 +566,7 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED) ||
if (link->flags & DL_FLAG_STATELESS ||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
@ -713,7 +610,7 @@ static void __device_links_queue_sync_state(struct device *dev,
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status != DL_STATE_ACTIVE)
return;
@ -823,7 +720,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
*
* Also change the status of @dev's links to suppliers to "active".
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
@ -842,24 +739,11 @@ void device_links_driver_bound(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
* Links created during consumer probe may be in the "consumer
* probe" state to start with if the supplier is still probing
* when they are created and they may become "active" if the
* consumer probe returns first. Skip them here.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
driver_deferred_probe_add(link->consumer);
}
if (defer_sync_state_count)
@ -868,7 +752,7 @@ void device_links_driver_bound(struct device *dev)
__device_links_queue_sync_state(dev, &sync_list);
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
@ -888,13 +772,6 @@ void device_links_driver_bound(struct device *dev)
device_links_flush_sync_list(&sync_list, dev);
}
static void device_link_drop_managed(struct device_link *link)
{
link->flags &= ~DL_FLAG_MANAGED;
WRITE_ONCE(link->status, DL_STATE_NONE);
kref_put(&link->kref, __device_link_del);
}
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
@ -905,58 +782,28 @@ static void device_link_drop_managed(struct device_link *link)
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
device_link_drop_managed(link);
else if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
kref_put(&link->kref, __device_link_del);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
dev->links.status = DL_DEV_NO_DRIVER;
}
/**
* device_links_no_driver - Update links after failing driver probe.
* @dev: Device whose driver has just failed to probe.
*
* Clean up leftover links to consumers for @dev and invoke
* %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_no_driver(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
* The probe has failed, so if the status of the link is
* "consumer probe" or "active", it must have been added by
* a probing consumer while this device was still probing.
* Change its state to "dormant", as it represents a valid
* relationship, but it is not functionally meaningful.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
__device_links_no_driver(dev);
device_links_write_unlock();
@ -970,7 +817,7 @@ void device_links_no_driver(struct device *dev)
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
@ -979,7 +826,7 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
@ -992,7 +839,7 @@ void device_links_driver_cleanup(struct device *dev)
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
device_link_drop_managed(link);
kref_put(&link->kref, __device_link_del);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@ -1015,7 +862,7 @@ void device_links_driver_cleanup(struct device *dev)
*
* Return 'false' if there are no probing or active consumers.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
@ -1025,7 +872,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@ -1055,7 +902,7 @@ bool device_links_busy(struct device *dev)
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
@ -1067,7 +914,7 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
if (!(link->flags & DL_FLAG_MANAGED) ||
if (link->flags & DL_FLAG_STATELESS ||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;

View File

@ -116,7 +116,7 @@ static void deferred_probe_work_func(struct work_struct *work)
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
void driver_deferred_probe_add(struct device *dev)
static void driver_deferred_probe_add(struct device *dev)
{
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {

View File

@ -1531,7 +1531,7 @@ void pm_runtime_remove(struct device *dev)
* runtime PM references to the device, drop the usage counter of the device
* (as many times as needed).
*
* Links with the DL_FLAG_MANAGED flag unset are ignored.
* Links with the DL_FLAG_STATELESS flag set are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
@ -1548,7 +1548,7 @@ void pm_runtime_clean_up_links(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
if (link->flags & DL_FLAG_STATELESS)
continue;
while (refcount_dec_not_one(&link->rpm_active))

View File

@ -82,6 +82,22 @@ static struct wakeup_source deleted_ws = {
static void ws_printk(struct work_struct *work);
static DECLARE_DELAYED_WORK(ws_printk_work, ws_printk);
/**
* wakeup_source_prepare - Prepare a new wakeup source for initialization.
* @ws: Wakeup source to prepare.
* @name: Pointer to the name of the new wakeup source.
*
* Callers must ensure that the @name string won't be freed when @ws is still in
* use.
*/
void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
{
if (ws) {
memset(ws, 0, sizeof(*ws));
ws->name = name;
}
}
EXPORT_SYMBOL_GPL(wakeup_source_prepare);
static DEFINE_IDA(wakeup_ida);
/**

View File

@ -271,11 +271,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
* bounce the buffer due to global resource outage.
*/
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
if (err == -ENOMEM || err == -ENOSPC)
return BLK_STS_DEV_RESOURCE;

View File

@ -675,8 +675,6 @@ fw_load_ee_pthru:
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_CNTRL_ERR("MHI did not enter BHIE\n");
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_BOOTUP_TIMEOUT);
goto error_read;
}
@ -686,11 +684,6 @@ fw_load_ee_pthru:
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
if (ret) {
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_BOOTUP_TIMEOUT);
}
MHI_CNTRL_LOG("amss fw_load ret:%d\n", ret);
release_firmware(firmware);

View File

@ -216,6 +216,8 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
db = ring->iommu_base + (ring->wp - ring->base);
*ring->ctxt_wp = db;
mhi_write_db(mhi_cntrl, ring->db_addr, db);
smp_wmb();
}
void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
@ -1940,8 +1942,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
return 0;
error_dec_pendpkt:
if (in_mission_mode)
atomic_dec(&mhi_cntrl->pending_pkts);
atomic_dec(&mhi_cntrl->pending_pkts);
error_pm_state:
if (!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);

View File

@ -252,7 +252,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
u32 reset = 1, ready = 0;
struct mhi_event *mhi_event;
enum MHI_PM_STATE cur_state;
int ret = -EIO, i;
int ret, i;
MHI_CNTRL_LOG("Waiting to enter READY state\n");
@ -270,13 +270,11 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
/* device enter into error state */
if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state))
goto error_ready;
return -EIO;
/* device did not transition to ready state */
if (reset || !ready) {
ret = -ETIMEDOUT;
goto error_ready;
}
if (reset || !ready)
return -ETIMEDOUT;
MHI_CNTRL_LOG("Device in READY State\n");
write_lock_irq(&mhi_cntrl->pm_lock);
@ -288,7 +286,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
MHI_CNTRL_ERR("Error moving to state %s from %s\n",
to_mhi_pm_state_str(MHI_PM_POR),
to_mhi_pm_state_str(cur_state));
goto error_ready;
return -EIO;
}
read_lock_bh(&mhi_cntrl->pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
@ -297,7 +295,6 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
ret = mhi_init_mmio(mhi_cntrl);
if (ret) {
MHI_CNTRL_ERR("Error programming mmio registers\n");
ret = -EIO;
goto error_mmio;
}
@ -329,11 +326,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
error_mmio:
read_unlock_bh(&mhi_cntrl->pm_lock);
error_ready:
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_BOOTUP_TIMEOUT);
return ret;
return -EIO;
}
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
@ -815,7 +808,6 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
MHI_LOG("%s state to :%s\n", __func__,
TO_MHI_STATE_TRANS_STR(item->state));
queue_work(mhi_cntrl->wq, &mhi_cntrl->st_worker);
return 0;
}
@ -1145,14 +1137,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
return 0;
MHI_ERR("MHI did not reach mission mode within %d ms\n",
mhi_cntrl->timeout_ms);
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_BOOTUP_TIMEOUT);
return -ETIMEDOUT;
return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
}
EXPORT_SYMBOL(mhi_sync_power_up);

View File

@ -799,20 +799,12 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
unsigned long dma_attr)
{
int err = 0;
struct fastrpc_apps *me = &gfa;
if (me->dev == NULL) {
pr_err("device adsprpc-mem is not initialized\n");
return -ENODEV;
}
VERIFY(err, size > 0 && size < me->max_size_limit);
if (err) {
err = -EFAULT;
pr_err("adsprpc: %s: invalid allocation size 0x%zx\n",
__func__, size);
return err;
}
*vaddr = dma_alloc_attrs(me->dev, size, region_phys,
GFP_KERNEL, dma_attr);
if (IS_ERR_OR_NULL(*vaddr)) {
@ -884,9 +876,11 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
}
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
map->refs--;
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&me->hlock);
if (map->refs > 0)
return;
} else {
@ -1841,10 +1835,9 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
PERF_END);
for (i = bufs; i < bufs + handles; ++i) {
struct fastrpc_mmap *map = ctx->maps[i];
if (map) {
pages[i].addr = map->phys;
pages[i].size = map->size;
}
pages[i].addr = map->phys;
pages[i].size = map->size;
}
fdlist = (uint64_t *)&pages[bufs + handles];
crclist = (uint32_t *)&fdlist[M_FDLIST];

View File

@ -4044,9 +4044,9 @@ static int __init clk_debug_init(void)
if (!d)
return -ENOMEM;
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
inited = 1;
mutex_unlock(&clk_debug_lock);

View File

@ -543,15 +543,3 @@ config QM_DEBUGCC_SCUBA
Support for the debug clock controller on Qualcomm Technologies, Inc
SCUBA devices.
Say Y if you want to support the clock measurement functionality.
config CLOCK_CPU_OSM_660
tristate "OSM CPU Clock Controller for SDM660"
depends on COMMON_CLK_QCOM
help
Support for the OSM clock controller for SDM660.
Operating State Manager (OSM) is a hardware engine used by some
Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
voltage scaling in hardware. OSM is capable of controlling
frequency and voltage requests for multiple clusters via the
existence of multiple OSM domains.
Say Y if you want to support OSM clocks.

View File

@ -19,7 +19,6 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
# Keep alphabetically sorted by config
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLOCK_CPU_OSM_660) += clk-cpu-osm-660.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o

View File

@ -918,13 +918,8 @@ static int alpha_pll_huayra_determine_rate(struct clk_hw *hw,
{
unsigned long rrate, prate;
u32 l, a;
struct clk_hw *parent_hw;
parent_hw = clk_hw_get_parent(hw);
if (!parent_hw)
return -EINVAL;
prate = clk_hw_get_rate(parent_hw);
prate = clk_hw_get_rate(clk_hw_get_parent(hw));
rrate = alpha_huayra_pll_round_rate(req->rate, prate, &l, &a);
req->best_parent_hw = clk_hw_get_parent(hw);

File diff suppressed because it is too large Load Diff

View File

@ -1336,8 +1336,6 @@ static int clk_gfx3d_src_determine_rate(struct clk_hw *hw,
int ret;
xo = clk_hw_get_parent_by_index(hw, 0);
if (!xo)
return -EINVAL;
if (req->rate == clk_hw_get_rate(xo)) {
req->best_parent_hw = xo;
req->best_parent_rate = req->rate;
@ -1345,9 +1343,7 @@ static int clk_gfx3d_src_determine_rate(struct clk_hw *hw,
}
f = qcom_find_freq(rcg->freq_tbl, req->rate);
if (!f)
return -EINVAL;
else if (req->rate != f->freq)
if (!f || (req->rate != f->freq))
req->rate = f->freq;
/* Indexes of source from the parent map */

View File

@ -184,7 +184,7 @@ static struct clk_alpha_pll npu_cc_pll1 = {
/* 250MHz Configuration */
static struct alpha_pll_config npu_q6ss_pll_config = {
.l = 0xD,
.cal_l = 0x3F,
.cal_l = 0x1E,
.alpha = 0x555,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002067,
@ -319,12 +319,12 @@ static struct clk_rcg2 npu_cc_xo_clk_src = {
};
static const struct freq_tbl ftbl_npu_dsp_core_clk_src[] = {
F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0),
F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
{ }
};

View File

@ -505,8 +505,6 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
base_freq = c->reg_bases[REG_FREQ_LUT_TABLE];
base_volt = c->reg_bases[REG_VOLT_LUT_TABLE];
prev_cc = 0;
for (i = 0; i < lut_max_entries; i++) {
data = readl_relaxed(base_freq + i * lut_row_size);
src = (data & GENMASK(31, 30)) >> 30;

View File

@ -396,28 +396,21 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
return PTR_ERR(name);
mutex_lock(&dmabuf->lock);
spin_lock(&dmabuf->name_lock);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
goto out_unlock;
}
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
out_unlock:
spin_unlock(&dmabuf->name_lock);
mutex_unlock(&dmabuf->lock);
return ret;
}
static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
enum dma_data_direction direction);
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
enum dma_data_direction direction);
static long dma_buf_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@ -628,6 +621,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
@ -652,7 +646,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->file = file;
mutex_init(&dmabuf->lock);
spin_lock_init(&dmabuf->name_lock);
INIT_LIST_HEAD(&dmabuf->attachments);
dma_buf_ref_init(dmabuf);
@ -1056,7 +1049,6 @@ static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
return ret;
}
int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
enum dma_data_direction direction,
unsigned int offset, unsigned int len)
@ -1079,7 +1071,7 @@ int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
EXPORT_SYMBOL(dma_buf_begin_cpu_access_partial);
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@ -1107,7 +1099,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
int ret = 0;
@ -1134,7 +1126,7 @@ int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
EXPORT_SYMBOL(dma_buf_end_cpu_access_partial);
/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
@ -1301,7 +1293,7 @@ int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
{
int ret = 0;
if (WARN_ON(!dmabuf) || !flags)
if (WARN_ON(!dmabuf))
return -EINVAL;
if (dmabuf->ops->get_flags)
@ -1309,7 +1301,7 @@ int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_get_flags);
EXPORT_SYMBOL(dma_buf_get_flags);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)

View File

@ -564,6 +564,21 @@ static int mdm_subsys_ramdumps(int want_dumps,
return 0;
}
static void mdm_force_reset(const struct subsys_desc *mdm_subsys)
{
struct esoc_clink *esoc_clink =
container_of(mdm_subsys,
struct esoc_clink,
subsys);
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
esoc_mdm_log("[OEM] MDM force reset\n");
if (mdm->pon_ops->soft_reset)
mdm->pon_ops->soft_reset(mdm, true);
}
static int mdm_register_ssr(struct esoc_clink *esoc_clink)
{
struct subsys_desc *subsys = &esoc_clink->subsys;
@ -571,6 +586,7 @@ static int mdm_register_ssr(struct esoc_clink *esoc_clink)
subsys->shutdown = mdm_subsys_shutdown;
subsys->ramdump = mdm_subsys_ramdumps;
subsys->powerup = mdm_subsys_powerup;
subsys->force_reset = mdm_force_reset;
subsys->crash_shutdown = mdm_crash_shutdown;
return esoc_clink_register_ssr(esoc_clink);
}

View File

@ -32,7 +32,7 @@
#define DEF_RAMDUMP_DELAY 2000
#define DEF_SHUTDOWN_TIMEOUT 10000
#define DEF_MDM9X55_RESET_TIME 203
#define RD_BUF_SIZE 100
#define RD_BUF_SIZE 256
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
#define MDM_DBG_OFFSET 0x934

View File

@ -139,16 +139,13 @@ static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
return -EIO;
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@ -175,16 +172,13 @@ static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
@ -195,15 +189,12 @@ static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
int ret;
if (!entry || !buf)
return -EINVAL;
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
return -EIO;
memcpy(buf, var->Data, var->DataSize);
@ -272,9 +263,6 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
u8 *data;
int err;
if (!entry || !buf)
return -EINVAL;
if (is_compat()) {
struct compat_efi_variable *compat;
@ -326,16 +314,14 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
struct compat_efi_variable *compat;
unsigned long datasize = sizeof(var->Data);
size_t size;
int ret;
if (!entry || !buf)
return 0;
ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
var->DataSize = datasize;
if (ret)
var->DataSize = 1024;
if (efivar_entry_get(entry, &entry->var.Attributes,
&entry->var.DataSize, entry->var.Data))
return -EIO;
if (is_compat()) {

View File

@ -45,7 +45,39 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
struct efi_runtime_work efi_rts_work;
/* efi_runtime_service() function identifiers */
enum efi_rts_ids {
GET_TIME,
SET_TIME,
GET_WAKEUP_TIME,
SET_WAKEUP_TIME,
GET_VARIABLE,
GET_NEXT_VARIABLE,
SET_VARIABLE,
QUERY_VARIABLE_INFO,
GET_NEXT_HIGH_MONO_COUNT,
UPDATE_CAPSULE,
QUERY_CAPSULE_CAPS,
};
/*
* efi_runtime_work: Details of EFI Runtime Service work
* @arg<1-5>: EFI Runtime Service function arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
*/
struct efi_runtime_work {
void *arg1;
void *arg2;
void *arg3;
void *arg4;
void *arg5;
efi_status_t status;
struct work_struct work;
enum efi_rts_ids efi_rts_id;
struct completion efi_rts_comp;
};
/*
* efi_queue_work: Queue efi_runtime_service() and wait until it's done
@ -59,10 +91,11 @@ struct efi_runtime_work efi_rts_work;
*/
#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
({ \
struct efi_runtime_work efi_rts_work; \
efi_rts_work.status = EFI_ABORTED; \
\
init_completion(&efi_rts_work.efi_rts_comp); \
INIT_WORK(&efi_rts_work.work, efi_call_rts); \
INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
efi_rts_work.arg1 = _arg1; \
efi_rts_work.arg2 = _arg2; \
efi_rts_work.arg3 = _arg3; \
@ -158,16 +191,18 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
*/
static void efi_call_rts(struct work_struct *work)
{
struct efi_runtime_work *efi_rts_work;
void *arg1, *arg2, *arg3, *arg4, *arg5;
efi_status_t status = EFI_NOT_FOUND;
arg1 = efi_rts_work.arg1;
arg2 = efi_rts_work.arg2;
arg3 = efi_rts_work.arg3;
arg4 = efi_rts_work.arg4;
arg5 = efi_rts_work.arg5;
efi_rts_work = container_of(work, struct efi_runtime_work, work);
arg1 = efi_rts_work->arg1;
arg2 = efi_rts_work->arg2;
arg3 = efi_rts_work->arg3;
arg4 = efi_rts_work->arg4;
arg5 = efi_rts_work->arg5;
switch (efi_rts_work.efi_rts_id) {
switch (efi_rts_work->efi_rts_id) {
case GET_TIME:
status = efi_call_virt(get_time, (efi_time_t *)arg1,
(efi_time_cap_t *)arg2);
@ -225,8 +260,8 @@ static void efi_call_rts(struct work_struct *work)
*/
pr_err("Requested executing invalid EFI Runtime Service.\n");
}
efi_rts_work.status = status;
complete(&efi_rts_work.efi_rts_comp);
efi_rts_work->status = status;
complete(&efi_rts_work->efi_rts_comp);
}
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)

View File

@ -364,7 +364,8 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_type =
uint8_t grph_obj_type=
grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;

View File

@ -694,11 +694,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
if (size > 4096 || size & 3 || *pos & 3)
if (size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
offset = *pos & GENMASK_ULL(11, 0);
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@ -729,7 +729,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
value = data[result >> 2];
value = data[offset++];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;

View File

@ -419,7 +419,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);

View File

@ -684,8 +684,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = width * blk256_height * bpe;
swath_bytes_vert_wc = height * blk256_width * bpe;
swath_bytes_horz_wc = height * blk256_height * bpe;
swath_bytes_vert_wc = width * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */

View File

@ -1364,34 +1364,28 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
}
} else {
frame.colorimetry = HDMI_COLORIMETRY_NONE;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
}
frame.scan_mode = HDMI_SCAN_MODE_NONE;

View File

@ -545,12 +545,10 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
DRM_DEBUG_LEASE("Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee);
idr_destroy(&leases);
goto out_leases;
}
@ -585,6 +583,7 @@ out_lessee:
out_leases:
put_unused_fd(fd);
idr_destroy(&leases);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;

View File

@ -1722,9 +1722,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_info(dev, "failed to get regulators: %d\n", ret);
return ret;
dev_info(dev, "failed to get regulators: %d\n", ret);
return -EPROBE_DEFER;
}
dsi->clks = devm_kcalloc(dev,
@ -1737,10 +1736,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
dsi->clks[i] = devm_clk_get(dev,
OLD_SCLK_MIPI_CLK_NAME);
if (!IS_ERR(dsi->clks[i]))
continue;
strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
i--;
continue;
}
dev_info(dev, "failed to get the clock: %s\n",

View File

@ -272,17 +272,10 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
* service if no active vgpu.
*/
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
mutex_unlock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
@ -297,6 +290,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);

View File

@ -506,18 +506,10 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
unsigned int pipe)
struct drm_plane *primary,
struct drm_plane *cursor, unsigned int pipe)
{
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
for (i = 0; i < mtk_crtc->layer_nr; i++) {
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
primary = &mtk_crtc->planes[i];
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
cursor = &mtk_crtc->planes[i];
}
int ret;
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@ -630,7 +622,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
goto unprepare;
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
NULL, pipe);
if (ret < 0)
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);

View File

@ -1464,7 +1464,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a702 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_size = SZ_128K,
.busy_mask = 0xfffffffe,
.bus_width = 16,
.bus_width = 32,
},
.prim_fifo_threshold = 0x0000c000,
.sqefw_name = "a702_sqe.fw",

View File

@ -150,8 +150,7 @@ static void a6xx_init(struct adreno_device *adreno_dev)
/* LP DDR4 highest bank bit is different and needs to be overridden */
if (adreno_is_a650(adreno_dev) && of_fdt_get_ddrtype() == 0x7)
adreno_dev->highest_bank_bit = 15;
else if ((adreno_is_a610(adreno_dev) || adreno_is_a702(adreno_dev))
&& of_fdt_get_ddrtype() == 0x5) {
else if (adreno_is_a610(adreno_dev) && of_fdt_get_ddrtype() == 0x5) {
/*
* LPDDR3 has multiple different highest bank bit value
* based on different DDR density. Query this value from

View File

@ -387,12 +387,6 @@ static const unsigned int a6xx_gmu_wrapper_registers[] = {
0x1f840, 0x1f840, 0x1f844, 0x1f845, 0x1f887, 0x1f889,
/* GMU AO*/
0x23b0C, 0x23b0E, 0x23b15, 0x23b15,
/* GPU CC */
0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
0x26000, 0x26002,
};
enum a6xx_debugbus_id {
@ -1825,12 +1819,11 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
adreno_snapshot_registers(device, snapshot,
a6xx_rscc_snapshot_registers,
ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
}
if (!gmu_core_isenabled(device))
} else if (adreno_is_a610(adreno_dev) || adreno_is_a702(adreno_dev)) {
adreno_snapshot_registers(device, snapshot,
a6xx_gmu_wrapper_registers,
ARRAY_SIZE(a6xx_gmu_wrapper_registers) / 2);
}
sptprac_on = gpudev->sptprac_is_on(adreno_dev);

View File

@ -1755,6 +1755,26 @@ void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
}
}
static void kgsl_send_uevent_cmd_notify(struct kgsl_device *desc, int contextId,
int timestamp)
{
char *envp[4];
char *title = "GPU_CMD_PAGE_FAULT";
if (!desc)
return;
envp[0] = kasprintf(GFP_KERNEL, "title=%s", title);
envp[1] = kasprintf(GFP_KERNEL, "cntId=%d", contextId);
envp[2] = kasprintf(GFP_KERNEL, "timestamp=%d", timestamp);
envp[3] = NULL;
kobject_uevent_env(&desc->dev->kobj, KOBJ_CHANGE, envp);
kfree(envp[0]);
kfree(envp[1]);
kfree(envp[2]);
}
/**
* process_cmdobj_fault() - Process a cmdobj for fault policies
* @device: Device on which the cmdobj caused a fault
@ -1926,6 +1946,8 @@ static void process_cmdobj_fault(struct kgsl_device *device,
pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
state, drawobj->context->id, drawobj->timestamp);
kgsl_send_uevent_cmd_notify(device, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
mark_guilty_context(device, drawobj->context->id);

View File

@ -2808,7 +2808,7 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
return 0;
unmap:
if (param->type == KGSL_USER_MEM_TYPE_DMABUF) {
if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) {
kgsl_destroy_ion(entry->priv_data);
entry->memdesc.sgt = NULL;
}
@ -3122,7 +3122,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
return result;
error_attach:
switch (memtype) {
switch (kgsl_memdesc_usermem_type(&entry->memdesc)) {
case KGSL_MEM_ENTRY_ION:
kgsl_destroy_ion(entry->priv_data);
entry->memdesc.sgt = NULL;
@ -4481,6 +4481,8 @@ kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
if (vma->vm_flags & VM_WRITE)
return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE;
if (memdesc->size != vma_size) {
dev_err(device->dev,
"memstore bad size: %d should be %llu\n",
@ -4823,23 +4825,25 @@ static void kgsl_send_uevent_notify(struct kgsl_device *desc, char *comm,
unsigned long len, unsigned long total_vm,
unsigned long largest_gap_cpu, unsigned long largest_gap_gpu)
{
char *envp[6];
char *envp[7];
char *title = "GPU_VM";
if (!desc)
return;
envp[0] = kasprintf(GFP_KERNEL, "COMM=%s", comm);
envp[1] = kasprintf(GFP_KERNEL, "LEN=%lu", len);
envp[2] = kasprintf(GFP_KERNEL, "TOTAL_VM=%lu", total_vm);
envp[3] = kasprintf(GFP_KERNEL, "LARGEST_GAP_CPU=%lu", largest_gap_cpu);
envp[4] = kasprintf(GFP_KERNEL, "LARGEST_GAP_GPU=%lu", largest_gap_gpu);
envp[5] = NULL;
envp[0] = kasprintf(GFP_KERNEL, "title=%s", title);
envp[1] = kasprintf(GFP_KERNEL, "COMM=%s", comm);
envp[2] = kasprintf(GFP_KERNEL, "LEN=%lu", len);
envp[3] = kasprintf(GFP_KERNEL, "TOTAL_VM=%lu", total_vm);
envp[4] = kasprintf(GFP_KERNEL, "LARGEST_GAP_CPU=%lu", largest_gap_cpu);
envp[5] = kasprintf(GFP_KERNEL, "LARGEST_GAP_GPU=%lu", largest_gap_gpu);
envp[6] = NULL;
kobject_uevent_env(&desc->dev->kobj, KOBJ_CHANGE, envp);
kfree(envp[4]);
kfree(envp[3]);
kfree(envp[2]);
kfree(envp[1]);
kfree(envp[0]);
kfree(envp[1]);
kfree(envp[2]);
kfree(envp[3]);
kfree(envp[4]);
kfree(envp[5]);
}
static int current_pid = -1;

View File

@ -737,6 +737,25 @@ static struct kgsl_process_private *kgsl_iommu_get_process(u64 ptbase)
return NULL;
}
static void kgsl_send_uevent_iommu_notify(struct kgsl_device *desc, char *fault_type,
const char *context_name)
{
char *envp[4];
char *title = "GPU_IOMMU_PAGE_FAULT";
if (!desc)
return;
envp[0] = kasprintf(GFP_KERNEL, "title=%s", title);
envp[1] = kasprintf(GFP_KERNEL, "fault_type=%s", fault_type);
envp[2] = kasprintf(GFP_KERNEL, "context=%s", context_name);
envp[3] = NULL;
kobject_uevent_env(&desc->dev->kobj, KOBJ_CHANGE, envp);
kfree(envp[0]);
kfree(envp[1]);
kfree(envp[2]);
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct device *dev, unsigned long addr, int flags, void *token)
{
@ -757,7 +776,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
unsigned int no_page_fault_log = 0;
char *fault_type = "unknown";
char *comm = "unknown";
bool fault_ret_flag = false;
struct kgsl_process_private *private;
static DEFINE_RATELIMIT_STATE(_rs,
@ -786,6 +804,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
private = kgsl_iommu_get_process(ptbase);
kgsl_send_uevent_iommu_notify(device, fault_type, ctx->name);
if (private) {
pid = private->pid;
comm = private->comm;
@ -801,11 +821,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
* Turn off GPU IRQ so we don't get faults from it too.
* The device mutex must be held to change power state
*/
if (mutex_trylock(&device->mutex)) {
kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
mutex_unlock(&device->mutex);
} else
fault_ret_flag = true;
mutex_lock(&device->mutex);
kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
mutex_unlock(&device->mutex);
}
contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
@ -864,8 +882,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
dev_err(ctx->kgsldev->dev, "*EMPTY*\n");
}
}
if (fault_ret_flag)
return ret;
/*
@ -2426,6 +2442,22 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
return addr;
}
static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt,
u64 gpuaddr, u64 size)
{
if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) &&
((gpuaddr + size) > pt->compat_va_start &&
(gpuaddr + size) <= pt->compat_va_end))
return true;
if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) &&
((gpuaddr + size) > pt->svm_start &&
(gpuaddr + size) <= pt->svm_end))
return true;
return false;
}
static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size)
{
@ -2433,9 +2465,7 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
struct kgsl_iommu_pt *pt = pagetable->priv;
struct rb_node *node;
/* Make sure the requested address doesn't fall in the global range */
if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
if (!iommu_addr_in_svm_ranges(pt, gpuaddr, size))
return -ENOMEM;
spin_lock(&pagetable->lock);

View File

@ -343,8 +343,7 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);

View File

@ -1425,6 +1425,14 @@ static void hid_output_field(const struct hid_device *hid,
}
}
static size_t hid_compute_report_size(struct hid_report *report)
{
if (report->size)
return ((report->size - 1) >> 3) + 1;
return 0;
}
/*
* Create a report. 'data' has to be allocated using
* hid_alloc_report_buf() so that it has proper size.
@ -1437,7 +1445,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
memset(data, 0, ((report->size - 1) >> 3) + 1);
memset(data, 0, hid_compute_report_size(report));
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
@ -1564,7 +1572,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
csize--;
}
rsize = ((report->size - 1) >> 3) + 1;
rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;

View File

@ -124,8 +124,6 @@ static const struct hid_device_id hammer_devices[] = {
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,

View File

@ -468,7 +468,6 @@
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f

View File

@ -1125,6 +1125,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
if (!bit)
return;
if (device->driver->input_mapped &&
device->driver->input_mapped(device, hidinput, field, usage,
&bit, &max) < 0) {

View File

@ -841,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
if (!*bit)
return -1;
input_set_capability(hi->input, EV_KEY, code);
return 1;

View File

@ -341,14 +341,6 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Trekstor SURFBOOK E11B",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Direkt-Tek DTLAPY116-2",
.matches = {

View File

@ -27,7 +27,7 @@ static struct tmc_drvdata *tmcdrvdata;
static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
size_t bytes, size_t *len, char **bufp)
{
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
size_t actual;
if (*len >= bytes)
@ -60,7 +60,7 @@ static irqreturn_t etr_handler(int irq, void *data)
static void tmc_etr_flush_bytes(loff_t *ppos, size_t bytes, size_t *len)
{
uint32_t rwp = 0;
dma_addr_t paddr = tmcdrvdata->sysfs_buf->hwaddr;
dma_addr_t paddr = tmcdrvdata->etr_buf->hwaddr;
rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
@ -200,7 +200,7 @@ int usb_bypass_start(struct byte_cntr *byte_cntr_data)
}
atomic_set(&byte_cntr_data->usb_free_buf, USB_BUF_NUM);
byte_cntr_data->offset = tmc_sg_get_rwp_offset(tmcdrvdata);
byte_cntr_data->offset = tmcdrvdata->etr_buf->offset;
byte_cntr_data->read_active = true;
/*
* IRQ is a '8- byte' counter and to observe interrupt at
@ -321,7 +321,7 @@ static int usb_transfer_small_packet(struct qdss_request *usb_req,
struct byte_cntr *drvdata, size_t *small_size)
{
int ret = 0;
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
size_t req_size, actual;
long w_offset;
@ -346,8 +346,7 @@ static int usb_transfer_small_packet(struct qdss_request *usb_req,
drvdata->usb_req = usb_req;
req_size -= actual;
if ((drvdata->offset + actual) >=
tmcdrvdata->sysfs_buf->size)
if ((drvdata->offset + actual) >= tmcdrvdata->size)
drvdata->offset = 0;
else
drvdata->offset += actual;
@ -384,9 +383,8 @@ static void usb_read_work_fn(struct work_struct *work)
{
int ret, i, seq = 0;
struct qdss_request *usb_req = NULL;
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
size_t actual, req_size, req_sg_num, small_size = 0;
size_t actual_total = 0;
char *buf;
struct byte_cntr *drvdata =
container_of(work, struct byte_cntr, read_work);
@ -417,7 +415,6 @@ static void usb_read_work_fn(struct work_struct *work)
req_size = USB_BLK_SIZE - small_size;
small_size = 0;
actual_total = 0;
if (req_size > 0) {
seq++;
@ -458,14 +455,13 @@ static void usb_read_work_fn(struct work_struct *work)
sg_mark_end(&usb_req->sg[i]);
if ((drvdata->offset + actual) >=
tmcdrvdata->sysfs_buf->size)
tmcdrvdata->size)
drvdata->offset = 0;
else
drvdata->offset += actual;
actual_total += actual;
}
usb_req->length = actual_total;
usb_req->length = req_size;
drvdata->usb_req = usb_req;
usb_req->num_sgs = i;

View File

@ -303,7 +303,6 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
pm_runtime_put(dev);
ret = 0;
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))

View File

@ -45,9 +45,6 @@ struct etr_perf_buffer {
/* Lower limit for ETR hardware buffer */
#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
/* SW USB reserved memory size */
#define TMC_ETR_SW_USB_BUF_SIZE SZ_32M
/*
* The TMC ETR SG has a page size of 4K. The SG table contains pointers
* to 4KB buffers. However, the OS may use a PAGE_SIZE different from
@ -1091,12 +1088,7 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
static struct etr_buf *
tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
{
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
&& drvdata->byte_cntr->sw_usb)
return tmc_alloc_etr_buf(drvdata, TMC_ETR_SW_USB_BUF_SIZE,
0, cpu_to_node(0), NULL);
else
return tmc_alloc_etr_buf(drvdata, drvdata->size,
return tmc_alloc_etr_buf(drvdata, drvdata->size,
0, cpu_to_node(0), NULL);
}
@ -1455,9 +1447,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
|| !drvdata->usbch) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM ||
(drvdata->byte_cntr->sw_usb &&
drvdata->out_mode == TMC_ETR_OUT_MODE_USB)) {
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
/*
* ETR DDR memory is not allocated until user enables
* tmc at least once. If user specifies different ETR
@ -1473,13 +1463,38 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
}
coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
} else if (drvdata->byte_cntr->sw_usb) {
if (!drvdata->etr_buf) {
free_buf = new_buf =
tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf)) {
return -ENOMEM;
}
}
coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
drvdata->usbch = usb_qdss_open("qdss_mdm",
drvdata->byte_cntr,
usb_bypass_notifier);
if (IS_ERR_OR_NULL(drvdata->usbch)) {
dev_err(drvdata->dev, "usb_qdss_open failed\n");
return -ENODEV;
}
} else {
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
if (IS_ERR_OR_NULL(drvdata->usbch)) {
dev_err(drvdata->dev, "usb_qdss_open failed\n");
return -ENODEV;
}
}
spin_lock_irqsave(&drvdata->spinlock, flags);
}
if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto unlock_out;
goto out;
}
/*
@ -1489,7 +1504,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
*/
if (drvdata->mode == CS_MODE_SYSFS) {
atomic_inc(csdev->refcnt);
goto unlock_out;
goto out;
}
/*
@ -1507,40 +1522,16 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
&& drvdata->byte_cntr->sw_usb)) {
ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
if (ret)
goto unlock_out;
goto out;
}
drvdata->mode = CS_MODE_SYSFS;
drvdata->enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
if (drvdata->byte_cntr->sw_usb)
drvdata->usbch = usb_qdss_open("qdss_mdm",
drvdata->byte_cntr,
usb_bypass_notifier);
else
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
if (IS_ERR_OR_NULL(drvdata->usbch)) {
dev_err(&csdev->dev, "usb_qdss_open failed\n");
drvdata->enable = false;
drvdata->mode = CS_MODE_DISABLED;
if (drvdata->byte_cntr->sw_usb)
tmc_etr_disable_hw(drvdata);
ret = -ENODEV;
goto out;
}
}
atomic_inc(csdev->refcnt);
goto out;
unlock_out:
drvdata->enable = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
out:
/* Free memory outside the spinlock if need be */
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
@ -2045,18 +2036,27 @@ static int _tmc_disable_etr_sink(struct coresight_device *csdev,
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if ((drvdata->out_mode == TMC_ETR_OUT_MODE_USB
&& drvdata->byte_cntr->sw_usb)
|| drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
else {
usb_bypass_stop(drvdata->byte_cntr);
flush_workqueue(drvdata->byte_cntr->usb_wq);
drvdata->usbch = NULL;
}
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
&& drvdata->byte_cntr->sw_usb) {
usb_bypass_stop(drvdata->byte_cntr);
flush_workqueue(drvdata->byte_cntr->usb_wq);
drvdata->usbch = NULL;
coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
if (drvdata->etr_buf) {
tmc_etr_free_sysfs_buf(drvdata->etr_buf);
drvdata->etr_buf = NULL;
}
}
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
if (drvdata->etr_buf) {
tmc_etr_free_sysfs_buf(drvdata->etr_buf);
drvdata->etr_buf = NULL;
}
}
out:
dev_info(drvdata->dev, "TMC-ETR disabled\n");

View File

@ -491,7 +491,7 @@ static int msc_configure(struct msc *msc)
lockdep_assert_held(&msc->buf_mutex);
if (msc->mode > MSC_MODE_MULTI)
return -EINVAL;
return -ENOTSUPP;
if (msc->mode == MSC_MODE_MULTI)
msc_buffer_clear_hw_header(msc);
@ -942,7 +942,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
} else if (msc->mode == MSC_MODE_MULTI) {
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
} else {
ret = -EINVAL;
ret = -ENOTSUPP;
}
if (!ret) {
@ -1165,7 +1165,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
if (ret >= 0)
*ppos = iter->offset;
} else {
ret = -EINVAL;
ret = -ENOTSUPP;
}
put_count:

View File

@ -210,11 +210,6 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Elkhart Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Elkhart Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),

View File

@ -248,7 +248,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
if (ret == -ENOENT)
retdesc = ERR_PTR(-EPROBE_DEFER);
if (PTR_ERR(retdesc) != -EPROBE_DEFER)
if (ret != -EPROBE_DEFER)
dev_err(dev, "error trying to get descriptor: %d\n", ret);
return retdesc;

View File

@ -352,18 +352,10 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
struct i2c_client *client;
dev = bus_find_device(&i2c_bus_type, NULL, adev,
i2c_acpi_find_match_device);
if (!dev)
return NULL;
client = i2c_verify_client(dev);
if (!client)
put_device(dev);
return client;
return dev ? i2c_verify_client(dev) : NULL;
}
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,

View File

@ -806,10 +806,10 @@ static int _i3c_geni_execute_command
if (gi3c->err) {
if (rnw == READ_TRANSACTION)
writel_relaxed(1, gi3c->se.base +
SE_DMA_RX_FSM_RST);
SE_DMA_TX_FSM_RST);
else
writel_relaxed(1, gi3c->se.base +
SE_DMA_TX_FSM_RST);
SE_DMA_RX_FSM_RST);
wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT);
}
geni_se_rx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
@ -868,8 +868,8 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
while (1) {
u8 rx_buf[8], tx_buf[8];
struct i3c_xfer_params xfer = { FIFO_MODE };
struct i3c_dev_boardinfo *i3cboardinfo = NULL;
struct i3c_dev_desc *i3cdev = NULL;
struct i3c_dev_boardinfo *i3cboardinfo;
struct i3c_dev_desc *i3cdev;
u64 pid;
u8 bcr, dcr, init_dyn_addr = 0, addr = 0;
bool enum_slv = false;
@ -895,9 +895,6 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
((u64)rx_buf[5]);
list_for_each_entry(i3cboardinfo, &m->boardinfo.i3c, node) {
if (i3cboardinfo == NULL)
break;
if (pid == i3cboardinfo->pid) {
GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
"PID 0x:%x matched with boardinfo\n", pid);
@ -905,14 +902,11 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
}
}
if (i3cboardinfo == NULL) {
GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
"Invalid i3cboardinfo\n");
goto daa_err;
}
if (i3cboardinfo)
addr = init_dyn_addr = i3cboardinfo->init_dyn_addr;
addr = init_dyn_addr = i3cboardinfo->init_dyn_addr;
addr = ret = i3c_master_get_free_addr(m, addr);
if (ret < 0) {
GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
"error during get_free_addr ret:%d for pid:0x:%x\n"

View File

@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
{"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};

View File

@ -731,7 +731,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
u32 cor;
if (!chan)
continue;
@ -740,20 +739,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
chan->type == IIO_PRESSURE)
continue;
if (state) {
cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
if (chan->differential)
cor |= (BIT(chan->channel) |
BIT(chan->channel2)) <<
AT91_SAMA5D2_COR_DIFF_OFFSET;
else
cor &= ~(BIT(chan->channel) <<
AT91_SAMA5D2_COR_DIFF_OFFSET);
at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
}
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));

View File

@ -1208,35 +1208,6 @@ static int qcom_vadc_scale_hw_smb1398_temp(
return 0;
}
static int qcom_vadc_scale_hw_pm2250_s3_die_temp(
const struct vadc_prescale_ratio *prescale,
const struct adc_data *data,
u16 adc_code, int *result_mdec)
{
s64 voltage = 0, adc_vdd_ref_mv = 1875;
if (adc_code > VADC5_MAX_CODE)
adc_code = 0;
/* (ADC code * vref_vadc (1.875V)) / full_scale_code */
voltage = (s64) adc_code * adc_vdd_ref_mv * 1000;
voltage = div64_s64(voltage, data->full_scale_code_volt);
if (voltage > 0) {
voltage *= prescale->den;
voltage = div64_s64(voltage, prescale->num);
} else {
voltage = 0;
}
voltage = PMIC5_PM2250_S3_DIE_TEMP_CONSTANT - voltage;
voltage *= 100000;
voltage = div64_s64(voltage, PMIC5_PM2250_S3_DIE_TEMP_SCALE_FACTOR);
*result_mdec = voltage;
return 0;
}
static int qcom_vadc_scale_hw_chg5_temp(
const struct vadc_prescale_ratio *prescale,
const struct adc_data *data,
@ -1358,9 +1329,6 @@ int qcom_vadc_hw_scale(enum vadc_scale_fn_type scaletype,
case SCALE_HW_CALIB_PM5_SMB1398_TEMP:
return qcom_vadc_scale_hw_smb1398_temp(prescale, data,
adc_code, result);
case SCALE_HW_CALIB_PM2250_S3_DIE_TEMP:
return qcom_vadc_scale_hw_pm2250_s3_die_temp(prescale, data,
adc_code, result);
case SCALE_HW_CALIB_THERM_100K_PU_PM7:
return qcom_vadc7_scale_hw_calib_therm(prescale, data,
adc_code, result);

View File

@ -47,9 +47,6 @@
#define PMIC5_SMB1398_TEMP_SCALE_FACTOR 340
#define PMIC5_SMB1398_TEMP_CONSTANT 268235
#define PMIC5_PM2250_S3_DIE_TEMP_SCALE_FACTOR 187263
#define PMIC5_PM2250_S3_DIE_TEMP_CONSTANT 720100
#define PMI_CHG_SCALE_1 -138890
#define PMI_CHG_SCALE_2 391750000000LL
@ -175,8 +172,6 @@ struct vadc_prescale_ratio {
* SCALE_HW_CALIB_BATT_THERM_400K: Returns battery thermistor voltage in
* decidegC using 400k pullup. The hardware applies offset/slope to adc
* code.
* SCALE_HW_CALIB_PM2250_S3_DIE_TEMP: Returns result in millidegrees for
* S3 die temperature channel on PM2250.
*/
enum vadc_scale_fn_type {
SCALE_DEFAULT = 0,
@ -195,7 +190,6 @@ enum vadc_scale_fn_type {
SCALE_HW_CALIB_BATT_THERM_30K,
SCALE_HW_CALIB_BATT_THERM_400K,
SCALE_HW_CALIB_PM5_SMB1398_TEMP,
SCALE_HW_CALIB_PM2250_S3_DIE_TEMP,
SCALE_HW_CALIB_THERM_100K_PU_PM7,
SCALE_HW_CALIB_PMIC_THERM_PM7,
SCALE_HW_CALIB_MAX,

View File

@ -150,10 +150,8 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
/* Default wait time is 50ms, add 20% tolerance. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
/* Default wait time is 4.8ms, add 20% tolerance. */
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
mutex_init(&data->vcnl4200_al.lock);

View File

@ -563,7 +563,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
* We read all axes and discard all but one, for optimized
* reading, use the triggered buffer.
*/
*val = (s16)le16_to_cpu(hw_values[chan->address]);
*val = le16_to_cpu(hw_values[chan->address]);
ret = IIO_VAL_INT;
}

View File

@ -161,8 +161,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
return 0;
}
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
struct iio_trigger *trig)
static void stm32_timer_stop(struct stm32_timer_trigger *priv)
{
u32 ccer, cr1;
@ -180,12 +179,6 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv,
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
/* Force disable master mode */
if (stm32_timer_is_trgo2_name(trig->name))
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
else
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
}
@ -204,7 +197,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
return ret;
if (freq == 0) {
stm32_timer_stop(priv, trig);
stm32_timer_stop(priv);
} else {
ret = stm32_timer_start(priv, trig, freq);
if (ret)

View File

@ -321,6 +321,7 @@ static const char * const qpnp_poff_reason[] = {
[39] = "Triggered by (OTST3 Over-temperature Stage 3)",
};
static bool is_black_screen;
static int
qpnp_pon_masked_write(struct qpnp_pon *pon, u16 addr, u8 mask, u8 val)
{
@ -971,6 +972,8 @@ static int qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
#endif
} else {
pr_info("Power-Key DOWN\n");
is_black_screen = dsi_panel_backlight_get() != 0 ?
false : true;
schedule_delayed_work(&pon->press_work, msecs_to_jiffies(4000));
schedule_delayed_work(&pon->press_pwr, msecs_to_jiffies(6000));
#ifdef CONFIG_KEY_FLUSH
@ -1216,7 +1219,9 @@ static void up_work_func(struct work_struct *work)
static void press_work_func(struct work_struct *work)
{
int display_bl, boot_mode;
int boot_mode;
bool is_black_screen_now;
bool black_screen_detected = false;
int rc;
uint pon_rt_sts = 0;
struct qpnp_pon_config *cfg;
@ -1237,12 +1242,19 @@ static void press_work_func(struct work_struct *work)
if ((pon_rt_sts & QPNP_PON_KPDPWR_N_SET) == 1) {
qpnp_powerkey_state_check(pon, 1);
dev_err(pon->dev, "after 4s Power-Key is still DOWN\n");
display_bl = dsi_panel_backlight_get();
is_black_screen_now = dsi_panel_backlight_get() != 0 ? false : true;
if (is_black_screen == true && is_black_screen_now == true)
black_screen_detected = true;
pr_info("bl_screen=%d bl_screen_now=%d, bl_screen_det=%d\n",
is_black_screen, is_black_screen_now, black_screen_detected);
boot_mode = get_boot_mode();
if (display_bl == 0 && boot_mode == MSM_BOOT_MODE_NORMAL) {
if (black_screen_detected == true && boot_mode == MSM_BOOT_MODE_NORMAL) {
pr_info(" ============== BLACK SCREEN DETECTED ==========");
oem_force_minidump_mode();
get_init_sched_info();
show_state_filter(TASK_UNINTERRUPTIBLE);
dump_runqueue();
dump_workqueue();
send_sig_to_get_trace("system_server");
send_sig_to_get_tombstone("surfaceflinger");
ksys_sync();
@ -2828,6 +2840,8 @@ static int qpnp_pon_probe(struct platform_device *pdev)
}
if (to_spmi_device(dev->parent)->usid == 10)
op_pm8998_regmap_register(pon->regmap);
if (to_spmi_device(dev->parent)->usid == 0)
op_pm8150_regmap_register(pon->regmap);
/* Get the total number of pon configurations and regulators */
for_each_available_child_of_node(dev->of_node, node) {
if (of_find_property(node, "regulator-name", NULL)) {

View File

@ -190,15 +190,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
start -= iova_offset(iovad, start);
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
for (i = 0; i < num_pages; i++) {
msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
msi_page->phys = start;
msi_page->iova = start;
INIT_LIST_HEAD(&msi_page->list);
list_add(&msi_page->list, &cookie->msi_page_list);
for (i = 0; i < num_pages; i++) {
msi_page[i].phys = start;
msi_page[i].iova = start;
INIT_LIST_HEAD(&msi_page[i].list);
list_add(&msi_page[i].list, &cookie->msi_page_list);
start += iovad->granule;
}

View File

@ -39,7 +39,6 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
@ -140,13 +139,6 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
BUG_ON(dev->is_virtfn);
/*
* Ignore devices that have a domain number higher than what can
* be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
*/
if (pci_domain_nr(dev->bus) > U16_MAX)
return NULL;
/* Only generate path[] for device addition event */
if (event == BUS_NOTIFY_ADD_DEVICE)
for (tmp = dev; tmp; tmp = tmp->bus->self)
@ -459,13 +451,12 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
/* Check for NUL termination within the designated length */
if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
pr_warn(FW_BUG
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return -EINVAL;
}
pr_info("ANDD device: %x name: %s\n", andd->device_number,
@ -491,14 +482,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
return 0;
}
}
pr_warn(FW_BUG
WARN_TAINT(
1, TAINT_FIRMWARE_WORKAROUND,
"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
rhsa->base_address,
drhd->reg_base_addr,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return 0;
}
@ -847,14 +838,14 @@ int __init dmar_table_init(void)
static void warn_invalid_dmar(u64 addr, const char *message)
{
pr_warn_once(FW_BUG
WARN_TAINT_ONCE(
1, TAINT_FIRMWARE_WORKAROUND,
"Your BIOS is broken; DMAR reported at address %llx%s!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
addr, message,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
}
static int __ref

View File

@ -3998,11 +3998,10 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
TAINT_FIRMWARE_WORKAROUND,
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@ -5144,10 +5143,8 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte && dma_pte_present(pte))
phys = dma_pte_addr(pte) +
(iova & (BIT_MASK(level_to_offset_bits(level) +
VTD_PAGE_SHIFT) - 1));
if (pte)
phys = dma_pte_addr(pte);
return phys;
}

View File

@ -45,6 +45,8 @@
#include <linux/wakeup_reason.h>
#include "irq-gic-common.h"
unsigned int qrtr_first_msg = 1;
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@ -377,6 +379,7 @@ static void gic_show_resume_irq(struct gic_chip_data *gic)
log_wakeup_reason(irq);
pr_warn("%s: %d triggered %s\n", __func__, irq, name);
qrtr_first_msg = 0;
}
}

View File

@ -6,7 +6,7 @@
#include <soc/qcom/mpm.h>
const struct mpm_pin mpm_scuba_gic_chip_data[] = {
{2, 307}, /*tsens0_tsens_upper_lower_int */
{2, 222},
{5, 328}, /* lpass_irq_out_sdc */
{12, 454}, /* b3_lfps_rxterm_irq */
{24, 111}, /* bi_px_lpi_1_aoss_mx */

View File

@ -183,7 +183,6 @@ static int led_pwm_set(struct led_classdev *led_cdev,
duty = led_data->period - duty;
led_data->duty = duty;
led_data->blinking = false;
__led_pwm_set(led_data);

View File

@ -313,16 +313,9 @@ static const struct i2c_device_id wf_ad7417_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
static const struct of_device_id wf_ad7417_of_id[] = {
{ .compatible = "ad7417", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
static struct i2c_driver wf_ad7417_driver = {
.driver = {
.name = "wf_ad7417",
.of_match_table = wf_ad7417_of_id,
},
.probe = wf_ad7417_probe,
.remove = wf_ad7417_remove,

View File

@ -583,16 +583,9 @@ static const struct i2c_device_id wf_fcu_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
static const struct of_device_id wf_fcu_of_id[] = {
{ .compatible = "fcu", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
static struct i2c_driver wf_fcu_driver = {
.driver = {
.name = "wf_fcu",
.of_match_table = wf_fcu_of_id,
},
.probe = wf_fcu_probe,
.remove = wf_fcu_remove,

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@ -93,14 +92,9 @@ static int wf_lm75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wf_lm75_sensor *lm;
int rc, ds1775;
int rc, ds1775 = id->driver_data;
const char *name, *loc;
if (id)
ds1775 = id->driver_data;
else
ds1775 = !!of_device_get_match_data(&client->dev);
DBG("wf_lm75: creating %s device at address 0x%02x\n",
ds1775 ? "ds1775" : "lm75", client->addr);
@ -171,17 +165,9 @@ static const struct i2c_device_id wf_lm75_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
static const struct of_device_id wf_lm75_of_id[] = {
{ .compatible = "lm75", .data = (void *)0},
{ .compatible = "ds1775", .data = (void *)1 },
{ }
};
MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
static struct i2c_driver wf_lm75_driver = {
.driver = {
.name = "wf_lm75",
.of_match_table = wf_lm75_of_id,
},
.probe = wf_lm75_probe,
.remove = wf_lm75_remove,

View File

@ -168,16 +168,9 @@ static const struct i2c_device_id wf_lm87_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
static const struct of_device_id wf_lm87_of_id[] = {
{ .compatible = "lm87cimt", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
static struct i2c_driver wf_lm87_driver = {
.driver = {
.name = "wf_lm87",
.of_match_table = wf_lm87_of_id,
},
.probe = wf_lm87_probe,
.remove = wf_lm87_remove,

View File

@ -121,16 +121,9 @@ static const struct i2c_device_id wf_max6690_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
static const struct of_device_id wf_max6690_of_id[] = {
{ .compatible = "max6690", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
static struct i2c_driver wf_max6690_driver = {
.driver = {
.name = "wf_max6690",
.of_match_table = wf_max6690_of_id,
},
.probe = wf_max6690_probe,
.remove = wf_max6690_remove,

View File

@ -343,16 +343,9 @@ static const struct i2c_device_id wf_sat_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_sat_id);
static const struct of_device_id wf_sat_of_id[] = {
{ .compatible = "smu-sat", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_sat_of_id);
static struct i2c_driver wf_sat_driver = {
.driver = {
.name = "wf_smu_sat",
.of_match_table = wf_sat_of_id,
},
.probe = wf_sat_probe,
.remove = wf_sat_remove,

View File

@ -20,13 +20,8 @@
struct dm_bio_details {
struct gendisk *bi_disk;
u8 bi_partno;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
bio_end_io_t *bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity;
#endif
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
@ -35,11 +30,6 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
bd->bi_end_io = bio->bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
bd->bi_integrity = bio_integrity(bio);
#endif
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
@ -48,11 +38,6 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
bio->bi_end_io = bd->bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
bio->bi_integrity = bd->bi_integrity;
#endif
}
#endif

View File

@ -792,7 +792,6 @@ static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
*/
original_type = br->type;
sector0 = backup_br->sector;
bc->trims_total -= range_size(backup_br);
if (backup_br->type == TRIMMED)
list_del(&backup_br->trimmed_list);
backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT

View File

@ -6,8 +6,6 @@
* This file is released under the GPL.
*/
#include "dm-bio-record.h"
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
@ -278,7 +276,11 @@ struct dm_integrity_io {
struct completion *completion;
struct dm_bio_details bio_details;
struct gendisk *orig_bi_disk;
u8 orig_bi_partno;
bio_end_io_t *orig_bi_end_io;
struct bio_integrity_payload *orig_bi_integrity;
struct bvec_iter orig_bi_iter;
};
struct journal_completion {
@ -1252,9 +1254,14 @@ static void integrity_end_io(struct bio *bio)
{
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
dm_bio_restore(&dio->bio_details, bio);
if (bio->bi_integrity)
bio->bi_iter = dio->orig_bi_iter;
bio->bi_disk = dio->orig_bi_disk;
bio->bi_partno = dio->orig_bi_partno;
if (dio->orig_bi_integrity) {
bio->bi_integrity = dio->orig_bi_integrity;
bio->bi_opf |= REQ_INTEGRITY;
}
bio->bi_end_io = dio->orig_bi_end_io;
if (dio->completion)
complete(dio->completion);
@ -1340,7 +1347,7 @@ static void integrity_metadata(struct work_struct *w)
}
}
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@ -1384,7 +1391,7 @@ again:
if (likely(checksums != checksums_onstack))
kfree(checksums);
} else {
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
struct bio_integrity_payload *bip = dio->orig_bi_integrity;
if (bip) {
struct bio_vec biv;
@ -1788,13 +1795,20 @@ offload_to_thread:
} else
dio->completion = NULL;
dm_bio_record(&dio->bio_details, bio);
dio->orig_bi_iter = bio->bi_iter;
dio->orig_bi_disk = bio->bi_disk;
dio->orig_bi_partno = bio->bi_partno;
bio_set_dev(bio, ic->dev->bdev);
dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
dio->orig_bi_end_io = bio->bi_end_io;
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
generic_make_request(bio);
if (need_sync_io) {

View File

@ -10,20 +10,6 @@ menuconfig SPECTRA_CAMERA
IFE and postprocessing drivers.
source "drivers/media/platform/msm/vidc_3x/Kconfig"
menuconfig MSMB_CAMERA
bool "QTI MSM camera and video capture 2.0 support"
depends on ARCH_QCOM && VIDEO_V4L2 && I2C
help
Say Y here to enable selecting the video adapters for
QTI msm camera and video capture 2.0, enabling this
adds support for the camera driver stack including sensor, isp
and postprocessing drivers.
if MSMB_CAMERA
source "drivers/media/platform/msm/camera_v2/Kconfig"
endif # MSMB_CAMERA
source "drivers/media/platform/msm/cvp/Kconfig"
source "drivers/media/platform/msm/npu/Kconfig"
source "drivers/media/platform/msm/synx/Kconfig"

View File

@ -10,4 +10,3 @@ obj-$(CONFIG_MSM_NPU) += npu/
obj-$(CONFIG_MSM_GLOBAL_SYNX) += synx/
obj-$(CONFIG_TSPP) += broadcast/
obj-$(CONFIG_DVB_MPQ) += dvb/
obj-$(CONFIG_MSMB_CAMERA) += camera_v2/

Some files were not shown because too many files have changed in this diff Show More