From ed1f2e85da79274f3dc4092953f1359eb732f0c6 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Thu, 18 Jul 2019 16:28:24 -0700 Subject: iio: cros_ec: Add calibscale for 3d MEMS Add calibration scale support to accel, gyro and magnetometer. Check on eve with current firmware, check reading calibscale returns 1.0, check with newer firmware values are applied. Signed-off-by: Gwendal Grignou Signed-off-by: Jonathan Cameron --- include/linux/iio/common/cros_ec_sensors_core.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 0c636b9fe8d7..bb03a252bd04 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -62,7 +62,10 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; - s16 calib[CROS_EC_SENSOR_MAX_AXIS]; + struct calib_data { + s16 offset; + u16 scale; + } calib[CROS_EC_SENSOR_MAX_AXIS]; u8 samples[CROS_EC_SAMPLE_SIZE]; -- cgit v1.2.3-71-gd317 From a090965b882333500d8780e2c1762e17782f413f Mon Sep 17 00:00:00 2001 From: Denis Ciocca Date: Thu, 18 Jul 2019 15:53:43 -0700 Subject: iio:common:st_sensors: add st_sensors_get_settings_index() helper function Extract from st_sensors_check_device_support() function the code that is used to get the specific settings for a device. This will be used as generic extractor by each ST driver. Signed-off-by: Denis Ciocca Signed-off-by: Jonathan Cameron --- drivers/iio/common/st_sensors/st_sensors_core.c | 49 +++++++++++++++++-------- include/linux/iio/common/st_sensors.h | 4 ++ 2 files changed, 38 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 8b22dc241482..3610ca9eaa87 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -633,28 +633,47 @@ static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, return 0; } +/* + * st_sensors_get_settings_index() - get index of the sensor settings for a + * specific device from list of settings + * @name: device name buffer reference. + * @list: sensor settings list. + * @list_length: length of sensor settings list. + * + * Return: non negative number on success (valid index), + * negative error code otherwise. + */ +int st_sensors_get_settings_index(const char *name, + const struct st_sensor_settings *list, + const int list_length) +{ + int i, n; + + for (i = 0; i < list_length; i++) { + for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) { + if (strcmp(name, list[i].sensors_supported[n]) == 0) + return i; + } + } + + return -ENODEV; +} +EXPORT_SYMBOL(st_sensors_get_settings_index); + int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings) { - int i, n, err = 0; - u8 wai; struct st_sensor_data *sdata = iio_priv(indio_dev); + int i, err; + u8 wai; - for (i = 0; i < num_sensors_list; i++) { - for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) { - if (strcmp(indio_dev->name, - sensor_settings[i].sensors_supported[n]) == 0) { - break; - } - } - if (n < ST_SENSORS_MAX_4WAI) - break; - } - if (i == num_sensors_list) { + i = st_sensors_get_settings_index(indio_dev->name, + sensor_settings, num_sensors_list); + if (i < 0) { dev_err(&indio_dev->dev, "device name %s not recognized.\n", - indio_dev->name); - return -ENODEV; + indio_dev->name); + return i; } err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 2948ac99e2da..17fbf3e9b013 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -334,6 +334,10 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); +int st_sensors_get_settings_index(const char *name, + const struct st_sensor_settings *list, + const int list_length); + int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings); -- cgit v1.2.3-71-gd317 From 1ecd245e0eb23d1c3803474eba75589743d0d1fe Mon Sep 17 00:00:00 2001 From: Denis Ciocca Date: Thu, 18 Jul 2019 15:53:52 -0700 Subject: iio: move 3-wire spi initialization to st_sensors_spi Some devices need to be configured with special bit in order to use spi 3-wire. This was done during device identification phase. Instead, let's move this part as spi specific. Doing this the check_device_support function becomes a simple device id check, so let's rename it. Signed-off-by: Denis Ciocca Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_core.c | 4 +- drivers/iio/accel/st_accel_spi.c | 4 +- drivers/iio/common/st_sensors/st_sensors_core.c | 64 ++++++------------------ drivers/iio/common/st_sensors/st_sensors_spi.c | 65 ++++++++++++++++++++++++- drivers/iio/gyro/st_gyro_core.c | 4 +- drivers/iio/gyro/st_gyro_spi.c | 4 +- drivers/iio/magnetometer/st_magn_core.c | 4 +- drivers/iio/magnetometer/st_magn_spi.c | 4 +- drivers/iio/pressure/st_pressure_core.c | 4 +- drivers/iio/pressure/st_pressure_spi.c | 4 +- include/linux/iio/common/st_sensors.h | 3 +- include/linux/iio/common/st_sensors_spi.h | 4 +- 12 files changed, 97 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 6fc490ffef7e..630909702a19 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1183,9 +1183,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) if (err) return err; - err = st_sensors_check_device_support(indio_dev, - ARRAY_SIZE(st_accel_sensors_settings), - st_accel_sensors_settings); + err = st_sensors_verify_id(indio_dev); if (err < 0) goto st_accel_power_off; diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c index c0556db9d60a..8af7027d5598 100644 --- a/drivers/iio/accel/st_accel_spi.c +++ b/drivers/iio/accel/st_accel_spi.c @@ -124,7 +124,9 @@ static int st_accel_spi_probe(struct spi_device *spi) adata = iio_priv(indio_dev); adata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_spi_configure(indio_dev, spi, adata); + err = st_sensors_spi_configure(indio_dev, spi); + if (err < 0) + return err; err = st_accel_common_probe(indio_dev); if (err < 0) diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 3610ca9eaa87..f05bf7cf0594 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -608,31 +608,6 @@ out: } EXPORT_SYMBOL(st_sensors_read_info_raw); -static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, - const struct st_sensor_settings *sensor_settings) -{ - struct st_sensor_data *sdata = iio_priv(indio_dev); - struct device_node *np = sdata->dev->of_node; - struct st_sensors_platform_data *pdata; - - pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; - if (((np && of_property_read_bool(np, "spi-3wire")) || - (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { - int err; - - err = sdata->tf->write_byte(&sdata->tb, sdata->dev, - sensor_settings->sim.addr, - sensor_settings->sim.value); - if (err < 0) { - dev_err(&indio_dev->dev, - "failed to init interface mode\n"); - return err; - } - } - - return 0; -} - /* * st_sensors_get_settings_index() - get index of the sensor settings for a * specific device from list of settings @@ -660,36 +635,30 @@ int st_sensors_get_settings_index(const char *name, } EXPORT_SYMBOL(st_sensors_get_settings_index); -int st_sensors_check_device_support(struct iio_dev *indio_dev, - int num_sensors_list, - const struct st_sensor_settings *sensor_settings) +/* + * st_sensors_verify_id() - verify sensor ID (WhoAmI) is matching with the + * expected value + * @indio_dev: IIO device reference. + * + * Return: 0 on success (valid sensor ID), else a negative error code. + */ +int st_sensors_verify_id(struct iio_dev *indio_dev) { struct st_sensor_data *sdata = iio_priv(indio_dev); - int i, err; + int err; u8 wai; - i = st_sensors_get_settings_index(indio_dev->name, - sensor_settings, num_sensors_list); - if (i < 0) { - dev_err(&indio_dev->dev, "device name %s not recognized.\n", - indio_dev->name); - return i; - } - - err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); - if (err < 0) - return err; - - if (sensor_settings[i].wai_addr) { + if (sdata->sensor_settings->wai_addr) { err = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sensor_settings[i].wai_addr, &wai); + sdata->sensor_settings->wai_addr, + &wai); if (err < 0) { dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n"); return err; } - if (sensor_settings[i].wai != wai) { + if (sdata->sensor_settings->wai != wai) { dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n", indio_dev->name, wai); @@ -697,12 +666,9 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, } } - sdata->sensor_settings = - (struct st_sensor_settings *)&sensor_settings[i]; - - return i; + return 0; } -EXPORT_SYMBOL(st_sensors_check_device_support); +EXPORT_SYMBOL(st_sensors_verify_id); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c index 2213843f02cb..a57cd648975c 100644 --- a/drivers/iio/common/st_sensors/st_sensors_spi.c +++ b/drivers/iio/common/st_sensors/st_sensors_spi.c @@ -102,9 +102,68 @@ static const struct st_sensor_transfer_function st_sensors_tf_spi = { .read_multiple_byte = st_sensors_spi_read_multiple_byte, }; -void st_sensors_spi_configure(struct iio_dev *indio_dev, - struct spi_device *spi, struct st_sensor_data *sdata) +/* + * st_sensors_is_spi_3_wire() - check if SPI 3-wire mode has been selected + * @spi: spi device reference. + * + * Return: true if SPI 3-wire mode is selected, false otherwise. + */ +static bool st_sensors_is_spi_3_wire(struct spi_device *spi) +{ + struct device_node *np = spi->dev.of_node; + struct st_sensors_platform_data *pdata; + + pdata = (struct st_sensors_platform_data *)spi->dev.platform_data; + if ((np && of_property_read_bool(np, "spi-3wire")) || + (pdata && pdata->spi_3wire)) { + return true; + } + + return false; +} + +/* + * st_sensors_configure_spi_3_wire() - configure SPI 3-wire if needed + * @spi: spi device reference. + * @settings: sensor specific settings reference. + * + * Return: 0 on success, else a negative error code. + */ +static int st_sensors_configure_spi_3_wire(struct spi_device *spi, + struct st_sensor_settings *settings) +{ + if (settings->sim.addr) { + u8 buffer[] = { + settings->sim.addr, + settings->sim.value + }; + + return spi_write(spi, buffer, 2); + } + + return 0; +} + +/* + * st_sensors_spi_configure() - configure SPI interface + * @indio_dev: IIO device reference. + * @spi: spi device reference. + * + * Return: 0 on success, else a negative error code. + */ +int st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi) { + struct st_sensor_data *sdata = iio_priv(indio_dev); + int err; + + if (st_sensors_is_spi_3_wire(spi)) { + err = st_sensors_configure_spi_3_wire(spi, + sdata->sensor_settings); + if (err < 0) + return err; + } + spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; @@ -113,6 +172,8 @@ void st_sensors_spi_configure(struct iio_dev *indio_dev, sdata->dev = &spi->dev; sdata->tf = &st_sensors_tf_spi; sdata->get_irq_data_ready = st_sensors_spi_get_irq; + + return 0; } EXPORT_SYMBOL(st_sensors_spi_configure); diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 5cc63d41d855..4b87e79aa744 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -400,9 +400,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) if (err) return err; - err = st_sensors_check_device_support(indio_dev, - ARRAY_SIZE(st_gyro_sensors_settings), - st_gyro_sensors_settings); + err = st_sensors_verify_id(indio_dev); if (err < 0) goto st_gyro_power_off; diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c index bb7082055f85..b5c624251231 100644 --- a/drivers/iio/gyro/st_gyro_spi.c +++ b/drivers/iio/gyro/st_gyro_spi.c @@ -91,7 +91,9 @@ static int st_gyro_spi_probe(struct spi_device *spi) gdata = iio_priv(indio_dev); gdata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_spi_configure(indio_dev, spi, gdata); + err = st_sensors_spi_configure(indio_dev, spi); + if (err < 0) + return err; err = st_gyro_common_probe(indio_dev); if (err < 0) diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 43a49a52c81a..3f313aefece6 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -502,9 +502,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev) if (err) return err; - err = st_sensors_check_device_support(indio_dev, - ARRAY_SIZE(st_magn_sensors_settings), - st_magn_sensors_settings); + err = st_sensors_verify_id(indio_dev); if (err < 0) goto st_magn_power_off; diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c index a3045afc6b53..fbf909bde841 100644 --- a/drivers/iio/magnetometer/st_magn_spi.c +++ b/drivers/iio/magnetometer/st_magn_spi.c @@ -73,7 +73,9 @@ static int st_magn_spi_probe(struct spi_device *spi) mdata = iio_priv(indio_dev); mdata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_spi_configure(indio_dev, spi, mdata); + err = st_sensors_spi_configure(indio_dev, spi); + if (err < 0) + return err; err = st_magn_common_probe(indio_dev); if (err < 0) diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 35d80ff27464..a783fc075c26 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -698,9 +698,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) if (err) return err; - err = st_sensors_check_device_support(indio_dev, - ARRAY_SIZE(st_press_sensors_settings), - st_press_sensors_settings); + err = st_sensors_verify_id(indio_dev); if (err < 0) goto st_press_power_off; diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c index 3e8c1ffe001e..7c8b70221e70 100644 --- a/drivers/iio/pressure/st_pressure_spi.c +++ b/drivers/iio/pressure/st_pressure_spi.c @@ -83,7 +83,9 @@ static int st_press_spi_probe(struct spi_device *spi) press_data = iio_priv(indio_dev); press_data->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_spi_configure(indio_dev, spi, press_data); + err = st_sensors_spi_configure(indio_dev, spi); + if (err < 0) + return err; err = st_press_common_probe(indio_dev); if (err < 0) diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 17fbf3e9b013..566b955e2980 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -338,8 +338,7 @@ int st_sensors_get_settings_index(const char *name, const struct st_sensor_settings *list, const int list_length); -int st_sensors_check_device_support(struct iio_dev *indio_dev, - int num_sensors_list, const struct st_sensor_settings *sensor_settings); +int st_sensors_verify_id(struct iio_dev *indio_dev); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h index 6020f7167859..90b25f087f06 100644 --- a/include/linux/iio/common/st_sensors_spi.h +++ b/include/linux/iio/common/st_sensors_spi.h @@ -13,7 +13,7 @@ #include #include -void st_sensors_spi_configure(struct iio_dev *indio_dev, - struct spi_device *spi, struct st_sensor_data *sdata); +int st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi); #endif /* ST_SENSORS_SPI_H */ -- cgit v1.2.3-71-gd317 From 062809ef7733209312562e87cefc84a470430929 Mon Sep 17 00:00:00 2001 From: Denis Ciocca Date: Thu, 18 Jul 2019 15:53:53 -0700 Subject: iio: make st_sensors drivers use regmap This patch is meant to replace the i2c/spi transfer functions with regmap. SPI framework requires DMA safe buffers so let's add GFP_DMA flag for memory allocation used by bulk_read functions. Signed-off-by: Denis Ciocca Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_buffer.c | 3 +- drivers/iio/accel/st_accel_core.c | 3 - drivers/iio/accel/st_accel_i2c.c | 4 +- drivers/iio/common/st_sensors/st_sensors_buffer.c | 10 +-- drivers/iio/common/st_sensors/st_sensors_core.c | 39 +++----- drivers/iio/common/st_sensors/st_sensors_i2c.c | 73 ++++++++------- drivers/iio/common/st_sensors/st_sensors_spi.c | 100 +++++---------------- drivers/iio/common/st_sensors/st_sensors_trigger.c | 10 +-- drivers/iio/gyro/st_gyro_buffer.c | 3 +- drivers/iio/gyro/st_gyro_core.c | 3 - drivers/iio/gyro/st_gyro_i2c.c | 4 +- drivers/iio/magnetometer/st_magn_buffer.c | 3 +- drivers/iio/magnetometer/st_magn_core.c | 3 - drivers/iio/magnetometer/st_magn_i2c.c | 4 +- drivers/iio/pressure/st_pressure_buffer.c | 3 +- drivers/iio/pressure/st_pressure_core.c | 3 - drivers/iio/pressure/st_pressure_i2c.c | 4 +- include/linux/iio/common/st_sensors.h | 40 +-------- include/linux/iio/common/st_sensors_i2c.h | 4 +- 19 files changed, 104 insertions(+), 212 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index 0205c0167cdd..05f9aea431e2 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c @@ -39,7 +39,8 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev) int err; struct st_sensor_data *adata = iio_priv(indio_dev); - adata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + adata->buffer_data = kmalloc(indio_dev->scan_bytes, + GFP_DMA | GFP_KERNEL); if (adata->buffer_data == NULL) { err = -ENOMEM; goto allocate_memory_error; diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 630909702a19..0b17004cb12e 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -1177,7 +1176,6 @@ int st_accel_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &accel_info; - mutex_init(&adata->tb.buf_lock); err = st_sensors_power_enable(indio_dev); if (err) @@ -1188,7 +1186,6 @@ int st_accel_common_probe(struct iio_dev *indio_dev) goto st_accel_power_off; adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS; - adata->multiread_bit = adata->sensor_settings->multi_read_bit; indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS; channels_size = indio_dev->num_channels * sizeof(struct iio_chan_spec); diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c index a92cf776031e..50fa0fc32baa 100644 --- a/drivers/iio/accel/st_accel_i2c.c +++ b/drivers/iio/accel/st_accel_i2c.c @@ -174,7 +174,9 @@ static int st_accel_i2c_probe(struct i2c_client *client) adata = iio_priv(indio_dev); adata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_i2c_configure(indio_dev, client, adata); + ret = st_sensors_i2c_configure(indio_dev, client); + if (ret < 0) + return ret; ret = st_accel_common_probe(indio_dev); if (ret < 0) diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c index 4a68669dc555..eee30130ae23 100644 --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c @@ -17,15 +17,16 @@ #include #include #include +#include #include static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) { - int i; struct st_sensor_data *sdata = iio_priv(indio_dev); unsigned int num_data_channels = sdata->num_data_channels; + int i; for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) { const struct iio_chan_spec *channel = &indio_dev->channels[i]; @@ -36,11 +37,8 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) channel->scan_type.storagebits >> 3; buf = PTR_ALIGN(buf, storage_bytes); - if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, - channel->address, - bytes_to_read, buf, - sdata->multiread_bit) < - bytes_to_read) + if (regmap_bulk_read(sdata->regmap, channel->address, + buf, bytes_to_read) < 0) return -EIO; /* Advance the buffer pointer */ diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index f05bf7cf0594..4a3064fb6cd9 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -28,19 +29,10 @@ static inline u32 st_sensors_get_unaligned_le24(const u8 *p) int st_sensors_write_data_with_mask(struct iio_dev *indio_dev, u8 reg_addr, u8 mask, u8 data) { - int err; - u8 new_data; struct st_sensor_data *sdata = iio_priv(indio_dev); - err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data); - if (err < 0) - goto st_sensors_write_data_with_mask_error; - - new_data = ((new_data & (~mask)) | ((data << __ffs(mask)) & mask)); - err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data); - -st_sensors_write_data_with_mask_error: - return err; + return regmap_update_bits(sdata->regmap, + reg_addr, mask, data << __ffs(mask)); } int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, @@ -48,19 +40,15 @@ int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, unsigned *readval) { struct st_sensor_data *sdata = iio_priv(indio_dev); - u8 readdata; int err; if (!readval) - return sdata->tf->write_byte(&sdata->tb, sdata->dev, - (u8)reg, (u8)writeval); + return regmap_write(sdata->regmap, reg, writeval); - err = sdata->tf->read_byte(&sdata->tb, sdata->dev, (u8)reg, &readdata); + err = regmap_read(sdata->regmap, reg, readval); if (err < 0) return err; - *readval = (unsigned)readdata; - return 0; } EXPORT_SYMBOL(st_sensors_debugfs_reg_access); @@ -545,7 +533,7 @@ st_sensors_match_scale_error: EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain); static int st_sensors_read_axis_data(struct iio_dev *indio_dev, - struct iio_chan_spec const *ch, int *data) + struct iio_chan_spec const *ch, int *data) { int err; u8 *outdata; @@ -554,13 +542,12 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev, byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits + ch->scan_type.shift, 8); - outdata = kmalloc(byte_for_channel, GFP_KERNEL); + outdata = kmalloc(byte_for_channel, GFP_DMA | GFP_KERNEL); if (!outdata) return -ENOMEM; - err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, - ch->address, byte_for_channel, - outdata, sdata->multiread_bit); + err = regmap_bulk_read(sdata->regmap, ch->address, + outdata, byte_for_channel); if (err < 0) goto st_sensors_free_memory; @@ -645,13 +632,11 @@ EXPORT_SYMBOL(st_sensors_get_settings_index); int st_sensors_verify_id(struct iio_dev *indio_dev) { struct st_sensor_data *sdata = iio_priv(indio_dev); - int err; - u8 wai; + int wai, err; if (sdata->sensor_settings->wai_addr) { - err = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->wai_addr, - &wai); + err = regmap_read(sdata->regmap, + sdata->sensor_settings->wai_addr, &wai); if (err < 0) { dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n"); diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c index b1c9812407e7..9240625534df 100644 --- a/drivers/iio/common/st_sensors/st_sensors_i2c.c +++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -26,55 +27,51 @@ static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev) return to_i2c_client(sdata->dev)->irq; } -static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 *res_byte) -{ - int err; - - err = i2c_smbus_read_byte_data(to_i2c_client(dev), reg_addr); - if (err < 0) - goto st_accel_i2c_read_byte_error; - - *res_byte = err & 0xff; - -st_accel_i2c_read_byte_error: - return err < 0 ? err : 0; -} - -static int st_sensors_i2c_read_multiple_byte( - struct st_sensor_transfer_buffer *tb, struct device *dev, - u8 reg_addr, int len, u8 *data, bool multiread_bit) -{ - if (multiread_bit) - reg_addr |= ST_SENSORS_I2C_MULTIREAD; - - return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev), - reg_addr, len, data); -} - -static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 data) -{ - return i2c_smbus_write_byte_data(to_i2c_client(dev), reg_addr, data); -} +static const struct regmap_config st_sensors_i2c_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; -static const struct st_sensor_transfer_function st_sensors_tf_i2c = { - .read_byte = st_sensors_i2c_read_byte, - .write_byte = st_sensors_i2c_write_byte, - .read_multiple_byte = st_sensors_i2c_read_multiple_byte, +static const struct regmap_config st_sensors_i2c_regmap_multiread_bit_config = { + .reg_bits = 8, + .val_bits = 8, + .read_flag_mask = ST_SENSORS_I2C_MULTIREAD, }; -void st_sensors_i2c_configure(struct iio_dev *indio_dev, - struct i2c_client *client, struct st_sensor_data *sdata) +/* + * st_sensors_i2c_configure() - configure I2C interface + * @indio_dev: IIO device reference. + * @client: i2c client reference. + * + * Return: 0 on success, else a negative error code. + */ +int st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client) { + struct st_sensor_data *sdata = iio_priv(indio_dev); + const struct regmap_config *config; + + if (sdata->sensor_settings->multi_read_bit) + config = &st_sensors_i2c_regmap_multiread_bit_config; + else + config = &st_sensors_i2c_regmap_config; + + sdata->regmap = devm_regmap_init_i2c(client, config); + if (IS_ERR(sdata->regmap)) { + dev_err(&client->dev, "Failed to register i2c regmap (%d)\n", + (int)PTR_ERR(sdata->regmap)); + return PTR_ERR(sdata->regmap); + } + i2c_set_clientdata(client, indio_dev); indio_dev->dev.parent = &client->dev; indio_dev->name = client->name; sdata->dev = &client->dev; - sdata->tf = &st_sensors_tf_i2c; sdata->get_irq_data_ready = st_sensors_i2c_get_irq; + + return 0; } EXPORT_SYMBOL(st_sensors_i2c_configure); diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c index a57cd648975c..9c0661a283d0 100644 --- a/drivers/iio/common/st_sensors/st_sensors_spi.c +++ b/drivers/iio/common/st_sensors/st_sensors_spi.c @@ -11,12 +11,12 @@ #include #include #include +#include #include - +#include "st_sensors_core.h" #define ST_SENSORS_SPI_MULTIREAD 0xc0 -#define ST_SENSORS_SPI_READ 0x80 static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev) { @@ -25,81 +25,15 @@ static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev) return to_spi_device(sdata->dev)->irq; } -static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit) -{ - int err; - - struct spi_transfer xfers[] = { - { - .tx_buf = tb->tx_buf, - .bits_per_word = 8, - .len = 1, - }, - { - .rx_buf = tb->rx_buf, - .bits_per_word = 8, - .len = len, - } - }; - - mutex_lock(&tb->buf_lock); - if ((multiread_bit) && (len > 1)) - tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD; - else - tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ; - - err = spi_sync_transfer(to_spi_device(dev), xfers, ARRAY_SIZE(xfers)); - if (err) - goto acc_spi_read_error; - - memcpy(data, tb->rx_buf, len); - mutex_unlock(&tb->buf_lock); - return len; - -acc_spi_read_error: - mutex_unlock(&tb->buf_lock); - return err; -} - -static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 *res_byte) -{ - return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false); -} - -static int st_sensors_spi_read_multiple_byte( - struct st_sensor_transfer_buffer *tb, struct device *dev, - u8 reg_addr, int len, u8 *data, bool multiread_bit) -{ - return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit); -} - -static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 data) -{ - int err; - - struct spi_transfer xfers = { - .tx_buf = tb->tx_buf, - .bits_per_word = 8, - .len = 2, - }; - - mutex_lock(&tb->buf_lock); - tb->tx_buf[0] = reg_addr; - tb->tx_buf[1] = data; - - err = spi_sync_transfer(to_spi_device(dev), &xfers, 1); - mutex_unlock(&tb->buf_lock); - - return err; -} +static const struct regmap_config st_sensors_spi_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; -static const struct st_sensor_transfer_function st_sensors_tf_spi = { - .read_byte = st_sensors_spi_read_byte, - .write_byte = st_sensors_spi_write_byte, - .read_multiple_byte = st_sensors_spi_read_multiple_byte, +static const struct regmap_config st_sensors_spi_regmap_multiread_bit_config = { + .reg_bits = 8, + .val_bits = 8, + .read_flag_mask = ST_SENSORS_SPI_MULTIREAD, }; /* @@ -155,6 +89,7 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev, struct spi_device *spi) { struct st_sensor_data *sdata = iio_priv(indio_dev); + const struct regmap_config *config; int err; if (st_sensors_is_spi_3_wire(spi)) { @@ -164,13 +99,24 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev, return err; } + if (sdata->sensor_settings->multi_read_bit) + config = &st_sensors_spi_regmap_multiread_bit_config; + else + config = &st_sensors_spi_regmap_config; + + sdata->regmap = devm_regmap_init_spi(spi, config); + if (IS_ERR(sdata->regmap)) { + dev_err(&spi->dev, "Failed to register spi regmap (%d)\n", + (int)PTR_ERR(sdata->regmap)); + return PTR_ERR(sdata->regmap); + } + spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; indio_dev->name = spi->modalias; sdata->dev = &spi->dev; - sdata->tf = &st_sensors_tf_spi; sdata->get_irq_data_ready = st_sensors_spi_get_irq; return 0; diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index 630c8cb35e8b..bed7b8682b17 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "st_sensors_core.h" @@ -26,8 +27,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, struct st_sensor_data *sdata) { - u8 status; - int ret; + int ret, status; /* How would I know if I can't check it? */ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) @@ -37,9 +37,9 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, if (!indio_dev->active_scan_mask) return 0; - ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->drdy_irq.stat_drdy.addr, - &status); + ret = regmap_read(sdata->regmap, + sdata->sensor_settings->drdy_irq.stat_drdy.addr, + &status); if (ret < 0) { dev_err(sdata->dev, "error checking samples available\n"); diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index 6e362f735e92..21360681d4dd 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c @@ -39,7 +39,8 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) int err; struct st_sensor_data *gdata = iio_priv(indio_dev); - gdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + gdata->buffer_data = kmalloc(indio_dev->scan_bytes, + GFP_DMA | GFP_KERNEL); if (gdata->buffer_data == NULL) { err = -ENOMEM; goto allocate_memory_error; diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 4b87e79aa744..02e42c945181 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -394,7 +393,6 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &gyro_info; - mutex_init(&gdata->tb.buf_lock); err = st_sensors_power_enable(indio_dev); if (err) @@ -405,7 +403,6 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) goto st_gyro_power_off; gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS; - gdata->multiread_bit = gdata->sensor_settings->multi_read_bit; indio_dev->channels = gdata->sensor_settings->ch; indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS; diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c index fa71e94b76f4..05a1a0874bd5 100644 --- a/drivers/iio/gyro/st_gyro_i2c.c +++ b/drivers/iio/gyro/st_gyro_i2c.c @@ -87,7 +87,9 @@ static int st_gyro_i2c_probe(struct i2c_client *client, gdata = iio_priv(indio_dev); gdata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_i2c_configure(indio_dev, client, gdata); + err = st_sensors_i2c_configure(indio_dev, client); + if (err < 0) + return err; err = st_gyro_common_probe(indio_dev); if (err < 0) diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index 11d7806655bc..9dba93539a99 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -34,7 +34,8 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev) int err; struct st_sensor_data *mdata = iio_priv(indio_dev); - mdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + mdata->buffer_data = kmalloc(indio_dev->scan_bytes, + GFP_DMA | GFP_KERNEL); if (mdata->buffer_data == NULL) { err = -ENOMEM; goto allocate_memory_error; diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 3f313aefece6..804353a483c7 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -496,7 +495,6 @@ int st_magn_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &magn_info; - mutex_init(&mdata->tb.buf_lock); err = st_sensors_power_enable(indio_dev); if (err) @@ -507,7 +505,6 @@ int st_magn_common_probe(struct iio_dev *indio_dev) goto st_magn_power_off; mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS; - mdata->multiread_bit = mdata->sensor_settings->multi_read_bit; indio_dev->channels = mdata->sensor_settings->ch; indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS; diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c index d5d565639bed..fdba480a12be 100644 --- a/drivers/iio/magnetometer/st_magn_i2c.c +++ b/drivers/iio/magnetometer/st_magn_i2c.c @@ -79,7 +79,9 @@ static int st_magn_i2c_probe(struct i2c_client *client, mdata = iio_priv(indio_dev); mdata->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_i2c_configure(indio_dev, client, mdata); + err = st_sensors_i2c_configure(indio_dev, client); + if (err < 0) + return err; err = st_magn_common_probe(indio_dev); if (err < 0) diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 4566e08a64a1..f21b630abaa0 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c @@ -39,7 +39,8 @@ static int st_press_buffer_postenable(struct iio_dev *indio_dev) int err; struct st_sensor_data *press_data = iio_priv(indio_dev); - press_data->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + press_data->buffer_data = kmalloc(indio_dev->scan_bytes, + GFP_DMA | GFP_KERNEL); if (press_data->buffer_data == NULL) { err = -ENOMEM; goto allocate_memory_error; diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index a783fc075c26..9ef92a501286 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -692,7 +691,6 @@ int st_press_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &press_info; - mutex_init(&press_data->tb.buf_lock); err = st_sensors_power_enable(indio_dev); if (err) @@ -709,7 +707,6 @@ int st_press_common_probe(struct iio_dev *indio_dev) * element. */ press_data->num_data_channels = press_data->sensor_settings->num_ch - 1; - press_data->multiread_bit = press_data->sensor_settings->multi_read_bit; indio_dev->channels = press_data->sensor_settings->ch; indio_dev->num_channels = press_data->sensor_settings->num_ch; diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c index 466e7dde5eae..71d2ed6b4948 100644 --- a/drivers/iio/pressure/st_pressure_i2c.c +++ b/drivers/iio/pressure/st_pressure_i2c.c @@ -112,7 +112,9 @@ static int st_press_i2c_probe(struct i2c_client *client, press_data = iio_priv(indio_dev); press_data->sensor_settings = (struct st_sensor_settings *)settings; - st_sensors_i2c_configure(indio_dev, client, press_data); + ret = st_sensors_i2c_configure(indio_dev, client); + if (ret < 0) + return ret; ret = st_press_common_probe(indio_dev); if (ret < 0) diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 566b955e2980..28fc1f9fa7d5 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -169,36 +170,6 @@ struct st_sensor_data_ready_irq { } ig1; }; -/** - * struct st_sensor_transfer_buffer - ST sensor device I/O buffer - * @buf_lock: Mutex to protect rx and tx buffers. - * @tx_buf: Buffer used by SPI transfer function to send data to the sensors. - * This buffer is used to avoid DMA not-aligned issue. - * @rx_buf: Buffer used by SPI transfer to receive data from sensors. - * This buffer is used to avoid DMA not-aligned issue. - */ -struct st_sensor_transfer_buffer { - struct mutex buf_lock; - u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH]; - u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned; -}; - -/** - * struct st_sensor_transfer_function - ST sensor device I/O function - * @read_byte: Function used to read one byte. - * @write_byte: Function used to write one byte. - * @read_multiple_byte: Function used to read multiple byte. - */ -struct st_sensor_transfer_function { - int (*read_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 *res_byte); - int (*write_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 data); - int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, int len, u8 *data, - bool multiread_bit); -}; - /** * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. @@ -242,16 +213,14 @@ struct st_sensor_settings { * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply + * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). - * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. * @buffer_data: Data used by buffer part. * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @get_irq_data_ready: Function to get the IRQ used for data ready signal. - * @tf: Transfer function structure used by I/O operations. - * @tb: Transfer buffers and mutex used by I/O operations. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. @@ -264,9 +233,9 @@ struct st_sensor_data { struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; + struct regmap *regmap; bool enabled; - bool multiread_bit; char *buffer_data; @@ -278,9 +247,6 @@ struct st_sensor_data { unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); - const struct st_sensor_transfer_function *tf; - struct st_sensor_transfer_buffer tb; - bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 5ada89944698..01e424e2af4f 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -14,8 +14,8 @@ #include #include -void st_sensors_i2c_configure(struct iio_dev *indio_dev, - struct i2c_client *client, struct st_sensor_data *sdata); +int st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client); #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev); -- cgit v1.2.3-71-gd317 From dca39af8831e65adc13e0f9f8bdca3a746185072 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 23 Jul 2019 10:36:38 +0300 Subject: iio: imu: adis: Add support for SPI transfer cs_change_delay The ADIS16460 requires a higher delay before the next transfer. Since the SPI framework supports configuring the delay before the next transfer, this driver will become the first user of it. The support for this functionality in ADIS16460 requires an addition to the ADIS lib to support the `cs_change_delay` functionality from the SPI subsystem. Signed-off-by: Michael Hennerich Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis.c | 12 ++++++++++++ include/linux/iio/imu/adis.h | 2 ++ 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index 30281e91dbf9..1631c255deab 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -39,18 +39,24 @@ int adis_write_reg(struct adis *adis, unsigned int reg, .len = 2, .cs_change = 1, .delay_usecs = adis->data->write_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 2, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = adis->data->write_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 4, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = adis->data->write_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 6, .bits_per_word = 8, @@ -133,12 +139,16 @@ int adis_read_reg(struct adis *adis, unsigned int reg, .len = 2, .cs_change = 1, .delay_usecs = adis->data->write_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 2, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = adis->data->read_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 4, .rx_buf = adis->rx, @@ -146,6 +156,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg, .len = 2, .cs_change = 1, .delay_usecs = adis->data->read_delay, + .cs_change_delay = adis->data->cs_change_delay, + .cs_change_delay_unit = SPI_DELAY_UNIT_USECS, }, { .rx_buf = adis->rx + 2, .bits_per_word = 8, diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 3428d06b2f44..4c53815bb729 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -26,6 +26,7 @@ struct adis_burst; * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us * @write_delay: SPI delay for write operations in us + * @cs_change_delay: SPI delay between CS changes in us * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register @@ -35,6 +36,7 @@ struct adis_burst; struct adis_data { unsigned int read_delay; unsigned int write_delay; + unsigned int cs_change_delay; unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; -- cgit v1.2.3-71-gd317 From 12bf745c9afb6755acba186091bcbb61229f4165 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Mon, 15 Jul 2019 16:14:51 -0700 Subject: iio: cros_ec: Add sign vector in core for backward compatibility To allow cros_ec iio core library to be used with legacy device, add a vector to rotate sensor data if necessary: legacy devices are not reporting data in HTML5/Android sensor referential. Check the data is not rotated on recent chromebooks that use the HTML5 standard to present sensor data. Signed-off-by: Gwendal Grignou Reviewed-by: Douglas Anderson Signed-off-by: Jonathan Cameron --- drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c | 4 ++++ include/linux/iio/common/cros_ec_sensors_core.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index d02660b1f0fe..3b491cf7be95 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -101,6 +101,9 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, } state->type = state->resp->info.type; state->loc = state->resp->info.location; + + /* Set sign vector, only used for backward compatibility. */ + memset(state->sign, 1, CROS_EC_SENSOR_MAX_AXIS); } return 0; @@ -303,6 +306,7 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev, if (ret < 0) return ret; + *data *= st->sign[i]; data++; } diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index bb03a252bd04..6f13c73bac3d 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -66,7 +66,7 @@ struct cros_ec_sensors_core_state { s16 offset; u16 scale; } calib[CROS_EC_SENSOR_MAX_AXIS]; - + s8 sign[CROS_EC_SENSOR_MAX_AXIS]; u8 samples[CROS_EC_SAMPLE_SIZE]; int (*read_ec_sensors_data)(struct iio_dev *indio_dev, -- cgit v1.2.3-71-gd317 From ae7b02ad2f32d39d1434655f346c04a16c1aa703 Mon Sep 17 00:00:00 2001 From: Fabien Lahoudere Date: Tue, 16 Jul 2019 11:11:06 +0200 Subject: iio: common: cros_ec_sensors: Expose cros_ec_sensors frequency range via iio sysfs Embedded controller return minimum and maximum frequencies, unfortunately we have no way to know the step for all available frequencies. Even if not complete, we can return a list of known values using the standard read_avail callback (IIO_CHAN_INFO_SAMP_FREQ) to provide them to userland. Now cros_ec_* sensors provides frequencies values in sysfs like this: "0 min max". 0 is always true to disable the sensor. Default frequencies are provided for earlier protocol. Signed-off-by: Nick Vaccaro Signed-off-by: Fabien Lahoudere [rebased on top of iio/testing and solved conflicts] Signed-off-by: Enric Balletbo i Serra Signed-off-by: Jonathan Cameron --- .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 3 + .../common/cros_ec_sensors/cros_ec_sensors_core.c | 65 ++++++++++++++++++++++ drivers/iio/light/cros_ec_light_prox.c | 3 + include/linux/iio/common/cros_ec_sensors_core.h | 21 +++++++ 4 files changed, 92 insertions(+) (limited to 'include') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 2af09606c438..6a4919cd83c3 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -215,6 +215,7 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev, static const struct iio_info ec_sensors_info = { .read_raw = &cros_ec_sensors_read, .write_raw = &cros_ec_sensors_write, + .read_avail = &cros_ec_sensors_core_read_avail, }; static int cros_ec_sensors_probe(struct platform_device *pdev) @@ -252,6 +253,8 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_FREQUENCY) | BIT(IIO_CHAN_INFO_SAMP_FREQ); + channel->info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; channel->scan_type.storagebits = CROS_EC_SENSOR_BITS; channel->scan_index = i; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 3b491cf7be95..fd833295bb17 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -50,6 +50,37 @@ static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev, return ret; } +static void get_default_min_max_freq(enum motionsensor_type type, + u32 *min_freq, + u32 *max_freq) +{ + switch (type) { + case MOTIONSENSE_TYPE_ACCEL: + case MOTIONSENSE_TYPE_GYRO: + *min_freq = 12500; + *max_freq = 100000; + break; + case MOTIONSENSE_TYPE_MAG: + *min_freq = 5000; + *max_freq = 25000; + break; + case MOTIONSENSE_TYPE_PROX: + case MOTIONSENSE_TYPE_LIGHT: + *min_freq = 100; + *max_freq = 50000; + break; + case MOTIONSENSE_TYPE_BARO: + *min_freq = 250; + *max_freq = 20000; + break; + case MOTIONSENSE_TYPE_ACTIVITY: + default: + *min_freq = 0; + *max_freq = 0; + break; + } +} + int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device) @@ -104,6 +135,19 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, /* Set sign vector, only used for backward compatibility. */ memset(state->sign, 1, CROS_EC_SENSOR_MAX_AXIS); + + /* 0 is a correct value used to stop the device */ + state->frequencies[0] = 0; + if (state->msg->version < 3) { + get_default_min_max_freq(state->resp->info.type, + &state->frequencies[1], + &state->frequencies[2]); + } else { + state->frequencies[1] = + state->resp->info_3.min_frequency; + state->frequencies[2] = + state->resp->info_3.max_frequency; + } } return 0; @@ -471,6 +515,27 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read); +int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask) +{ + struct cros_ec_sensors_core_state *state = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + *length = ARRAY_SIZE(state->frequencies); + *vals = (const int *)&state->frequencies; + *type = IIO_VAL_INT; + return IIO_AVAIL_LIST; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail); + int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask) diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 965f346d7d64..44313a009928 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -162,6 +162,7 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev, static const struct iio_info cros_ec_light_prox_info = { .read_raw = &cros_ec_light_prox_read, .write_raw = &cros_ec_light_prox_write, + .read_avail = &cros_ec_sensors_core_read_avail, }; static int cros_ec_light_prox_probe(struct platform_device *pdev) @@ -196,6 +197,8 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) channel->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_FREQUENCY); + channel->info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; channel->scan_type.storagebits = CROS_EC_SENSOR_BITS; channel->scan_type.shift = 0; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 6f13c73bac3d..ea1f50ce2e49 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -73,6 +73,9 @@ struct cros_ec_sensors_core_state { unsigned long scan_mask, s16 *data); int curr_sampl_freq; + + /* Table of known available frequencies : 0, Min and Max in mHz */ + int frequencies[3]; }; /** @@ -153,6 +156,24 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask); +/** + * cros_ec_sensors_core_read_avail() - get available values + * @indio_dev: pointer to state information for device + * @chan: channel specification structure table + * @vals: list of available values + * @type: type of data returned + * @length: number of data returned in the array + * @mask: specifies which values to be requested + * + * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST + */ +int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + /** * cros_ec_sensors_core_write() - function to write a value to the sensor * @st: pointer to state information for device -- cgit v1.2.3-71-gd317 From 9cd15d521a3adcb687a0f9a312e32caaa94f44c2 Mon Sep 17 00:00:00 2001 From: Denis Ciocca Date: Fri, 2 Aug 2019 10:59:13 -0700 Subject: iio: remove get_irq_data_ready() function pointer and use IRQ number directly Not even sure why it was there since the beginning. Just use IRQ number in the sensor_data struct. Signed-off-by: Denis Ciocca Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_core.c | 7 +++---- drivers/iio/common/st_sensors/st_sensors_i2c.c | 9 +-------- drivers/iio/common/st_sensors/st_sensors_spi.c | 9 +-------- drivers/iio/common/st_sensors/st_sensors_trigger.c | 21 ++++++++++----------- drivers/iio/gyro/st_gyro_core.c | 7 +++---- drivers/iio/magnetometer/st_magn_core.c | 7 +++---- drivers/iio/pressure/st_pressure_core.c | 7 +++---- include/linux/iio/common/st_sensors.h | 5 ++--- 8 files changed, 26 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 0b17004cb12e..2e37f8a6d8cf 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1169,7 +1169,6 @@ int st_accel_common_probe(struct iio_dev *indio_dev) struct st_sensor_data *adata = iio_priv(indio_dev); struct st_sensors_platform_data *pdata = (struct st_sensors_platform_data *)adata->dev->platform_data; - int irq = adata->get_irq_data_ready(indio_dev); struct iio_chan_spec *channels; size_t channels_size; int err; @@ -1217,7 +1216,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) if (err < 0) goto st_accel_power_off; - if (irq > 0) { + if (adata->irq > 0) { err = st_sensors_allocate_trigger(indio_dev, ST_ACCEL_TRIGGER_OPS); if (err < 0) @@ -1234,7 +1233,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) return 0; st_accel_device_register_error: - if (irq > 0) + if (adata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_accel_probe_trigger_error: st_accel_deallocate_ring(indio_dev); @@ -1252,7 +1251,7 @@ void st_accel_common_remove(struct iio_dev *indio_dev) st_sensors_power_disable(indio_dev); iio_device_unregister(indio_dev); - if (adata->get_irq_data_ready(indio_dev) > 0) + if (adata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_accel_deallocate_ring(indio_dev); diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c index 9240625534df..aa89d54a7c59 100644 --- a/drivers/iio/common/st_sensors/st_sensors_i2c.c +++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c @@ -20,13 +20,6 @@ #define ST_SENSORS_I2C_MULTIREAD 0x80 -static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev) -{ - struct st_sensor_data *sdata = iio_priv(indio_dev); - - return to_i2c_client(sdata->dev)->irq; -} - static const struct regmap_config st_sensors_i2c_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -69,7 +62,7 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev, indio_dev->name = client->name; sdata->dev = &client->dev; - sdata->get_irq_data_ready = st_sensors_i2c_get_irq; + sdata->irq = client->irq; return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c index 9c0661a283d0..2262f81b07c2 100644 --- a/drivers/iio/common/st_sensors/st_sensors_spi.c +++ b/drivers/iio/common/st_sensors/st_sensors_spi.c @@ -18,13 +18,6 @@ #define ST_SENSORS_SPI_MULTIREAD 0xc0 -static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev) -{ - struct st_sensor_data *sdata = iio_priv(indio_dev); - - return to_spi_device(sdata->dev)->irq; -} - static const struct regmap_config st_sensors_spi_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -117,7 +110,7 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev, indio_dev->name = spi->modalias; sdata->dev = &spi->dev; - sdata->get_irq_data_ready = st_sensors_spi_get_irq; + sdata->irq = spi->irq; return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index bed7b8682b17..4a2efa00f7f2 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -121,9 +121,9 @@ static irqreturn_t st_sensors_irq_thread(int irq, void *p) int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) { - int err, irq; struct st_sensor_data *sdata = iio_priv(indio_dev); unsigned long irq_trig; + int err; sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name); if (sdata->trig == NULL) { @@ -135,8 +135,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->trig->ops = trigger_ops; sdata->trig->dev.parent = sdata->dev; - irq = sdata->get_irq_data_ready(indio_dev); - irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + irq_trig = irqd_get_trigger_type(irq_get_irq_data(sdata->irq)); /* * If the IRQ is triggered on falling edge, we need to mark the * interrupt as active low, if the hardware supports this. @@ -206,12 +205,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->sensor_settings->drdy_irq.stat_drdy.addr) irq_trig |= IRQF_SHARED; - err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), - st_sensors_irq_handler, - st_sensors_irq_thread, - irq_trig, - sdata->trig->name, - sdata->trig); + err = request_threaded_irq(sdata->irq, + st_sensors_irq_handler, + st_sensors_irq_thread, + irq_trig, + sdata->trig->name, + sdata->trig); if (err) { dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n"); goto iio_trigger_free; @@ -227,7 +226,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, return 0; iio_trigger_register_error: - free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig); + free_irq(sdata->irq, sdata->trig); iio_trigger_free: iio_trigger_free(sdata->trig); return err; @@ -239,7 +238,7 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) struct st_sensor_data *sdata = iio_priv(indio_dev); iio_trigger_unregister(sdata->trig); - free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig); + free_irq(sdata->irq, sdata->trig); iio_trigger_free(sdata->trig); } EXPORT_SYMBOL(st_sensors_deallocate_trigger); diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 02e42c945181..c0acbb5d2ffb 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -388,7 +388,6 @@ EXPORT_SYMBOL(st_gyro_get_settings); int st_gyro_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *gdata = iio_priv(indio_dev); - int irq = gdata->get_irq_data_ready(indio_dev); int err; indio_dev->modes = INDIO_DIRECT_MODE; @@ -419,7 +418,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) if (err < 0) goto st_gyro_power_off; - if (irq > 0) { + if (gdata->irq > 0) { err = st_sensors_allocate_trigger(indio_dev, ST_GYRO_TRIGGER_OPS); if (err < 0) @@ -436,7 +435,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) return 0; st_gyro_device_register_error: - if (irq > 0) + if (gdata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_gyro_probe_trigger_error: st_gyro_deallocate_ring(indio_dev); @@ -454,7 +453,7 @@ void st_gyro_common_remove(struct iio_dev *indio_dev) st_sensors_power_disable(indio_dev); iio_device_unregister(indio_dev); - if (gdata->get_irq_data_ready(indio_dev) > 0) + if (gdata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_gyro_deallocate_ring(indio_dev); diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 804353a483c7..a3a268ee2896 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -490,7 +490,6 @@ EXPORT_SYMBOL(st_magn_get_settings); int st_magn_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *mdata = iio_priv(indio_dev); - int irq = mdata->get_irq_data_ready(indio_dev); int err; indio_dev->modes = INDIO_DIRECT_MODE; @@ -520,7 +519,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev) if (err < 0) goto st_magn_power_off; - if (irq > 0) { + if (mdata->irq > 0) { err = st_sensors_allocate_trigger(indio_dev, ST_MAGN_TRIGGER_OPS); if (err < 0) @@ -537,7 +536,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev) return 0; st_magn_device_register_error: - if (irq > 0) + if (mdata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_magn_probe_trigger_error: st_magn_deallocate_ring(indio_dev); @@ -555,7 +554,7 @@ void st_magn_common_remove(struct iio_dev *indio_dev) st_sensors_power_disable(indio_dev); iio_device_unregister(indio_dev); - if (mdata->get_irq_data_ready(indio_dev) > 0) + if (mdata->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_magn_deallocate_ring(indio_dev); diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 9ef92a501286..ca6863b32a5f 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -686,7 +686,6 @@ int st_press_common_probe(struct iio_dev *indio_dev) struct st_sensor_data *press_data = iio_priv(indio_dev); struct st_sensors_platform_data *pdata = (struct st_sensors_platform_data *)press_data->dev->platform_data; - int irq = press_data->get_irq_data_ready(indio_dev); int err; indio_dev->modes = INDIO_DIRECT_MODE; @@ -729,7 +728,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) if (err < 0) goto st_press_power_off; - if (irq > 0) { + if (press_data->irq > 0) { err = st_sensors_allocate_trigger(indio_dev, ST_PRESS_TRIGGER_OPS); if (err < 0) @@ -746,7 +745,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) return err; st_press_device_register_error: - if (irq > 0) + if (press_data->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_press_probe_trigger_error: st_press_deallocate_ring(indio_dev); @@ -764,7 +763,7 @@ void st_press_common_remove(struct iio_dev *indio_dev) st_sensors_power_disable(indio_dev); iio_device_unregister(indio_dev); - if (press_data->get_irq_data_ready(indio_dev) > 0) + if (press_data->irq > 0) st_sensors_deallocate_trigger(indio_dev); st_press_deallocate_ring(indio_dev); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 28fc1f9fa7d5..4d0889bf1c6c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -220,7 +220,7 @@ struct st_sensor_settings { * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. - * @get_irq_data_ready: Function to get the IRQ used for data ready signal. + * @irq: the IRQ number. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. @@ -244,8 +244,7 @@ struct st_sensor_data { u8 drdy_int_pin; bool int_pin_open_drain; - - unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); + int irq; bool edge_irq; bool hw_irq_trigger; -- cgit v1.2.3-71-gd317 From e031d5f558f1535968df1ecae8d734b84c17f78d Mon Sep 17 00:00:00 2001 From: Denis Ciocca Date: Mon, 5 Aug 2019 11:57:11 -0700 Subject: iio:st_sensors: remove buffer allocation at each buffer enable This patch is removing the buffer allocation at each buffer enable. We just allocate enough memory in the main structure during probe to cover maximum size needed (that anyway is pretty small) [16bytes]. Signed-off-by: Denis Ciocca Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_buffer.c | 12 +----------- drivers/iio/gyro/st_gyro_buffer.c | 12 +----------- drivers/iio/magnetometer/st_magn_buffer.c | 12 +----------- drivers/iio/pressure/st_pressure_buffer.c | 12 +----------- include/linux/iio/common/st_sensors.h | 14 +++++++++----- 5 files changed, 13 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index 59dcef02ec19..9f2b40474b8e 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c @@ -31,17 +31,11 @@ int st_accel_trig_set_state(struct iio_trigger *trig, bool state) static int st_accel_buffer_postenable(struct iio_dev *indio_dev) { - struct st_sensor_data *adata = iio_priv(indio_dev); int err; - adata->buffer_data = kmalloc(indio_dev->scan_bytes, - GFP_DMA | GFP_KERNEL); - if (!adata->buffer_data) - return -ENOMEM; - err = iio_triggered_buffer_postenable(indio_dev); if (err < 0) - goto st_accel_free_buffer; + return err; err = st_sensors_set_axis_enable(indio_dev, (u8)indio_dev->active_scan_mask[0]); @@ -58,14 +52,11 @@ st_accel_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); st_accel_buffer_predisable: iio_triggered_buffer_predisable(indio_dev); -st_accel_free_buffer: - kfree(adata->buffer_data); return err; } static int st_accel_buffer_predisable(struct iio_dev *indio_dev) { - struct st_sensor_data *adata = iio_priv(indio_dev); int err, err2; err = st_sensors_set_enable(indio_dev, false); @@ -79,7 +70,6 @@ st_accel_buffer_predisable: if (!err) err = err2; - kfree(adata->buffer_data); return err; } diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index c6ddfecc1fc3..7465ad62391c 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c @@ -31,17 +31,11 @@ int st_gyro_trig_set_state(struct iio_trigger *trig, bool state) static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) { - struct st_sensor_data *gdata = iio_priv(indio_dev); int err; - gdata->buffer_data = kmalloc(indio_dev->scan_bytes, - GFP_DMA | GFP_KERNEL); - if (!gdata->buffer_data) - return -ENOMEM; - err = iio_triggered_buffer_postenable(indio_dev); if (err < 0) - goto st_gyro_free_buffer; + return err; err = st_sensors_set_axis_enable(indio_dev, (u8)indio_dev->active_scan_mask[0]); @@ -58,15 +52,12 @@ st_gyro_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); st_gyro_buffer_predisable: iio_triggered_buffer_predisable(indio_dev); -st_gyro_free_buffer: - kfree(gdata->buffer_data); return err; } static int st_gyro_buffer_predisable(struct iio_dev *indio_dev) { int err, err2; - struct st_sensor_data *gdata = iio_priv(indio_dev); err = st_sensors_set_enable(indio_dev, false); if (err < 0) @@ -79,7 +70,6 @@ st_gyro_buffer_predisable: if (!err) err = err2; - kfree(gdata->buffer_data); return err; } diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index 658d627dad8e..bb425c167a96 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -31,17 +31,11 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state) static int st_magn_buffer_postenable(struct iio_dev *indio_dev) { - struct st_sensor_data *mdata = iio_priv(indio_dev); int err; - mdata->buffer_data = kmalloc(indio_dev->scan_bytes, - GFP_DMA | GFP_KERNEL); - if (!mdata->buffer_data) - return -ENOMEM; - err = iio_triggered_buffer_postenable(indio_dev); if (err < 0) - goto st_magn_free_buffer; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -51,14 +45,11 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev) st_magn_buffer_predisable: iio_triggered_buffer_predisable(indio_dev); -st_magn_free_buffer: - kfree(mdata->buffer_data); return err; } static int st_magn_buffer_predisable(struct iio_dev *indio_dev) { - struct st_sensor_data *mdata = iio_priv(indio_dev); int err, err2; err = st_sensors_set_enable(indio_dev, false); @@ -67,7 +58,6 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev) if (!err) err = err2; - kfree(mdata->buffer_data); return err; } diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 77cb2d862f19..418dbf9e6e1e 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c @@ -31,17 +31,11 @@ int st_press_trig_set_state(struct iio_trigger *trig, bool state) static int st_press_buffer_postenable(struct iio_dev *indio_dev) { - struct st_sensor_data *press_data = iio_priv(indio_dev); int err; - press_data->buffer_data = kmalloc(indio_dev->scan_bytes, - GFP_DMA | GFP_KERNEL); - if (!press_data->buffer_data) - return -ENOMEM; - err = iio_triggered_buffer_postenable(indio_dev); if (err < 0) - goto st_press_free_buffer; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -51,14 +45,11 @@ static int st_press_buffer_postenable(struct iio_dev *indio_dev) st_press_buffer_predisable: iio_triggered_buffer_predisable(indio_dev); -st_press_free_buffer: - kfree(press_data->buffer_data); return err; } static int st_press_buffer_predisable(struct iio_dev *indio_dev) { - struct st_sensor_data *press_data = iio_priv(indio_dev); int err, err2; err = st_sensors_set_enable(indio_dev, false); @@ -67,7 +58,6 @@ static int st_press_buffer_predisable(struct iio_dev *indio_dev) if (!err) err = err2; - kfree(press_data->buffer_data); return err; } diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 4d0889bf1c6c..686be532f4cb 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -20,8 +20,12 @@ #include -#define ST_SENSORS_TX_MAX_LENGTH 2 -#define ST_SENSORS_RX_MAX_LENGTH 6 +/* + * Buffer size max case: 2bytes per channel, 3 channels in total + + * 8bytes timestamp channel (s64) + */ +#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \ + sizeof(s64)) #define ST_SENSORS_ODR_LIST_MAX 10 #define ST_SENSORS_FULLSCALE_AVL_MAX 10 @@ -215,7 +219,6 @@ struct st_sensor_settings { * @vdd_io: Pointer to sensor's Vdd-IO power supply * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). - * @buffer_data: Data used by buffer part. * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). @@ -224,6 +227,7 @@ struct st_sensor_settings { * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. + * @buffer_data: Data used by buffer part. */ struct st_sensor_data { struct device *dev; @@ -237,8 +241,6 @@ struct st_sensor_data { bool enabled; - char *buffer_data; - unsigned int odr; unsigned int num_data_channels; @@ -249,6 +251,8 @@ struct st_sensor_data { bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; + + char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; }; #ifdef CONFIG_IIO_BUFFER -- cgit v1.2.3-71-gd317 From 47e4937a4a7ca4184fd282791dfee76c6799966a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 23 Aug 2019 05:36:59 +0800 Subject: erofs: move erofs out of staging EROFS filesystem has been merged into linux-staging for a year. EROFS is designed to be a better solution of saving extra storage space with guaranteed end-to-end performance for read-only files with the help of reduced metadata, fixed-sized output compression and decompression inplace technologies. In the past year, EROFS was greatly improved by many people as a staging driver, self-tested, betaed by a large number of our internal users, successfully applied to almost all in-service HUAWEI smartphones as the part of EMUI 9.1 and proven to be stable enough to be moved out of staging. EROFS is a self-contained filesystem driver. Although there are still some TODOs to be more generic, we have a dedicated team actively keeping on working on EROFS in order to make it better with the evolution of Linux kernel as the other in-kernel filesystems. As Pavel suggested, it's better to do as one commit since git can do moves and all histories will be saved in this way. Let's promote it from staging and enhance it more actively as a "real" part of kernel for more wider scenarios! Cc: Greg Kroah-Hartman Cc: Alexander Viro Cc: Andrew Morton Cc: Stephen Rothwell Cc: Theodore Ts'o Cc: Pavel Machek Cc: David Sterba Cc: Amir Goldstein Cc: Christoph Hellwig Cc: Darrick J . Wong Cc: Dave Chinner Cc: Jaegeuk Kim Cc: Jan Kara Cc: Richard Weinberger Cc: Linus Torvalds Cc: Chao Yu Cc: Miao Xie Cc: Li Guifu Cc: Fang Wei Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20190822213659.5501-1-hsiangkao@aol.com Signed-off-by: Greg Kroah-Hartman --- Documentation/filesystems/erofs.txt | 219 +++ MAINTAINERS | 14 +- drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - .../erofs/Documentation/filesystems/erofs.txt | 223 --- drivers/staging/erofs/Kconfig | 98 -- drivers/staging/erofs/Makefile | 13 - drivers/staging/erofs/TODO | 46 - drivers/staging/erofs/compress.h | 62 - drivers/staging/erofs/data.c | 425 ------ drivers/staging/erofs/decompressor.c | 360 ----- drivers/staging/erofs/dir.c | 141 -- drivers/staging/erofs/erofs_fs.h | 310 ----- drivers/staging/erofs/include/trace/events/erofs.h | 256 ---- drivers/staging/erofs/inode.c | 334 ----- drivers/staging/erofs/internal.h | 554 -------- drivers/staging/erofs/namei.c | 253 ---- drivers/staging/erofs/super.c | 671 --------- drivers/staging/erofs/tagptr.h | 110 -- drivers/staging/erofs/utils.c | 335 ----- drivers/staging/erofs/xattr.c | 705 ---------- drivers/staging/erofs/xattr.h | 94 -- drivers/staging/erofs/zdata.c | 1434 -------------------- drivers/staging/erofs/zdata.h | 195 --- drivers/staging/erofs/zmap.c | 468 ------- drivers/staging/erofs/zpvec.h | 159 --- fs/Kconfig | 1 + fs/Makefile | 1 + fs/erofs/Kconfig | 98 ++ fs/erofs/Makefile | 11 + fs/erofs/compress.h | 60 + fs/erofs/data.c | 423 ++++++ fs/erofs/decompressor.c | 358 +++++ fs/erofs/dir.c | 139 ++ fs/erofs/erofs_fs.h | 307 +++++ fs/erofs/inode.c | 332 +++++ fs/erofs/internal.h | 553 ++++++++ fs/erofs/namei.c | 251 ++++ fs/erofs/super.c | 669 +++++++++ fs/erofs/tagptr.h | 110 ++ fs/erofs/utils.c | 333 +++++ fs/erofs/xattr.c | 703 ++++++++++ fs/erofs/xattr.h | 92 ++ fs/erofs/zdata.c | 1432 +++++++++++++++++++ fs/erofs/zdata.h | 193 +++ fs/erofs/zmap.c | 466 +++++++ fs/erofs/zpvec.h | 157 +++ include/trace/events/erofs.h | 256 ++++ include/uapi/linux/magic.h | 1 + 49 files changed, 7172 insertions(+), 7256 deletions(-) create mode 100644 Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Kconfig delete mode 100644 drivers/staging/erofs/Makefile delete mode 100644 drivers/staging/erofs/TODO delete mode 100644 drivers/staging/erofs/compress.h delete mode 100644 drivers/staging/erofs/data.c delete mode 100644 drivers/staging/erofs/decompressor.c delete mode 100644 drivers/staging/erofs/dir.c delete mode 100644 drivers/staging/erofs/erofs_fs.h delete mode 100644 drivers/staging/erofs/include/trace/events/erofs.h delete mode 100644 drivers/staging/erofs/inode.c delete mode 100644 drivers/staging/erofs/internal.h delete mode 100644 drivers/staging/erofs/namei.c delete mode 100644 drivers/staging/erofs/super.c delete mode 100644 drivers/staging/erofs/tagptr.h delete mode 100644 drivers/staging/erofs/utils.c delete mode 100644 drivers/staging/erofs/xattr.c delete mode 100644 drivers/staging/erofs/xattr.h delete mode 100644 drivers/staging/erofs/zdata.c delete mode 100644 drivers/staging/erofs/zdata.h delete mode 100644 drivers/staging/erofs/zmap.c delete mode 100644 drivers/staging/erofs/zpvec.h create mode 100644 fs/erofs/Kconfig create mode 100644 fs/erofs/Makefile create mode 100644 fs/erofs/compress.h create mode 100644 fs/erofs/data.c create mode 100644 fs/erofs/decompressor.c create mode 100644 fs/erofs/dir.c create mode 100644 fs/erofs/erofs_fs.h create mode 100644 fs/erofs/inode.c create mode 100644 fs/erofs/internal.h create mode 100644 fs/erofs/namei.c create mode 100644 fs/erofs/super.c create mode 100644 fs/erofs/tagptr.h create mode 100644 fs/erofs/utils.c create mode 100644 fs/erofs/xattr.c create mode 100644 fs/erofs/xattr.h create mode 100644 fs/erofs/zdata.c create mode 100644 fs/erofs/zdata.h create mode 100644 fs/erofs/zmap.c create mode 100644 fs/erofs/zpvec.h create mode 100644 include/trace/events/erofs.h (limited to 'include') diff --git a/Documentation/filesystems/erofs.txt b/Documentation/filesystems/erofs.txt new file mode 100644 index 000000000000..38aa9126ec98 --- /dev/null +++ b/Documentation/filesystems/erofs.txt @@ -0,0 +1,219 @@ +Overview +======== + +EROFS file-system stands for Enhanced Read-Only File System. Different +from other read-only file systems, it aims to be designed for flexibility, +scalability, but be kept simple and high performance. + +It is designed as a better filesystem solution for the following scenarios: + - read-only storage media or + + - part of a fully trusted read-only solution, which means it needs to be + immutable and bit-for-bit identical to the official golden image for + their releases due to security and other considerations and + + - hope to save some extra storage space with guaranteed end-to-end performance + by using reduced metadata and transparent file compression, especially + for those embedded devices with limited memory (ex, smartphone); + +Here is the main features of EROFS: + - Little endian on-disk design; + + - Currently 4KB block size (nobh) and therefore maximum 16TB address space; + + - Metadata & data could be mixed by design; + + - 2 inode versions for different requirements: + v1 v2 + Inode metadata size: 32 bytes 64 bytes + Max file size: 4 GB 16 EB (also limited by max. vol size) + Max uids/gids: 65536 4294967296 + File creation time: no yes (64 + 32-bit timestamp) + Max hardlinks: 65536 4294967296 + Metadata reserved: 4 bytes 14 bytes + + - Support extended attributes (xattrs) as an option; + + - Support xattr inline and tail-end data inline for all files; + + - Support POSIX.1e ACLs by using xattrs; + + - Support transparent file compression as an option: + LZ4 algorithm with 4 KB fixed-output compression for high performance; + +The following git tree provides the file system user-space tools under +development (ex, formatting tool mkfs.erofs): +>> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git + +Bugs and patches are welcome, please kindly help us and send to the following +linux-erofs mailing list: +>> linux-erofs mailing list + +Mount options +============= + +fault_injection=%d Enable fault injection in all supported types with + specified injection rate. Supported injection type: + Type_Name Type_Value + FAULT_KMALLOC 0x000000001 + FAULT_READ_IO 0x000000002 +(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled + by default if CONFIG_EROFS_FS_XATTR is selected. +(no)acl Setup POSIX Access Control List. Note: acl is enabled + by default if CONFIG_EROFS_FS_POSIX_ACL is selected. +cache_strategy=%s Select a strategy for cached decompression from now on: + disabled: In-place I/O decompression only; + readahead: Cache the last incomplete compressed physical + cluster for further reading. It still does + in-place I/O decompression for the rest + compressed physical clusters; + readaround: Cache the both ends of incomplete compressed + physical clusters for further reading. + It still does in-place I/O decompression + for the rest compressed physical clusters. + +Module parameters +================= +use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). + +On-disk details +=============== + +Summary +------- +Different from other read-only file systems, an EROFS volume is designed +to be as simple as possible: + + |-> aligned with the block size + ____________________________________________________________ + | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | + |_|__|_|_____|__________|_____|______|__________|_____|______| + 0 +1K + +All data areas should be aligned with the block size, but metadata areas +may not. All metadatas can be now observed in two different spaces (views): + 1. Inode metadata space + Each valid inode should be aligned with an inode slot, which is a fixed + value (32 bytes) and designed to be kept in line with v1 inode size. + + Each inode can be directly found with the following formula: + inode offset = meta_blkaddr * block_size + 32 * nid + + |-> aligned with 8B + |-> followed closely + + meta_blkaddr blocks |-> another slot + _____________________________________________________________________ + | ... | inode | xattrs | extents | data inline | ... | inode ... + |________|_______|(optional)|(optional)|__(optional)_|_____|__________ + |-> aligned with the inode slot size + . . + . . + . . + . . + . . + . . + .____________________________________________________|-> aligned with 4B + | xattr_ibody_header | shared xattrs | inline xattrs | + |____________________|_______________|_______________| + |-> 12 bytes <-|->x * 4 bytes<-| . + . . . + . . . + . . . + ._______________________________.______________________. + | id | id | id | id | ... | id | ent | ... | ent| ... | + |____|____|____|____|______|____|_____|_____|____|_____| + |-> aligned with 4B + |-> aligned with 4B + + Inode could be 32 or 64 bytes, which can be distinguished from a common + field which all inode versions have -- i_advise: + + __________________ __________________ + | i_advise | | i_advise | + |__________________| |__________________| + | ... | | ... | + | | | | + |__________________| 32 bytes | | + | | + |__________________| 64 bytes + + Xattrs, extents, data inline are followed by the corresponding inode with + proper alignes, and they could be optional for different data mappings, + _currently_ there are totally 3 valid data mappings supported: + + 1) flat file data without data inline (no extent); + 2) fixed-output size data compression (must have extents); + 3) flat file data with tail-end data inline (no extent); + + The size of the optional xattrs is indicated by i_xattr_count in inode + header. Large xattrs or xattrs shared by many different files can be + stored in shared xattrs metadata rather than inlined right after inode. + + 2. Shared xattrs metadata space + Shared xattrs space is similar to the above inode space, started with + a specific block indicated by xattr_blkaddr, organized one by one with + proper align. + + Each share xattr can also be directly found by the following formula: + xattr offset = xattr_blkaddr * block_size + 4 * xattr_id + + |-> aligned by 4 bytes + + xattr_blkaddr blocks |-> aligned with 4 bytes + _________________________________________________________________________ + | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... + |________|_____________|_____________|_____|______________|_______________ + +Directories +----------- +All directories are now organized in a compact on-disk format. Note that +each directory block is divided into index and name areas in order to support +random file lookup, and all directory entries are _strictly_ recorded in +alphabetical order in order to support improved prefix binary search +algorithm (could refer to the related source code). + + ___________________________ + / | + / ______________|________________ + / / | nameoff1 | nameoffN-1 + ____________.______________._______________v________________v__________ +| dirent | dirent | ... | dirent | filename | filename | ... | filename | +|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| + \ ^ + \ | * could have + \ | trailing '\0' + \________________________| nameoff0 + + Directory block + +Note that apart from the offset of the first filename, nameoff0 also indicates +the total number of directory entries in this block since it is no need to +introduce another on-disk field at all. + +Compression +----------- +Currently, EROFS supports 4KB fixed-output clustersize transparent file +compression, as illustrated below: + + |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- + clusterofs clusterofs clusterofs + | | | logical data +_________v_______________________________v_____________________v_______________ +... | . | | . | | . | ... +____|____.________|_____________|________.____|_____________|__.__________|____ + |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| + size size size size size + . . . . + . . . . + . . . . + _______._____________._____________._____________._____________________ + ... | | | | ... physical data + _______|_____________|_____________|_____________|_____________________ + |-> cluster <-|-> cluster <-|-> cluster <-| + size size size + +Currently each on-disk physical cluster can contain 4KB (un)compressed data +at most. For each logical cluster, there is a corresponding on-disk index to +describe its cluster type, physical cluster address, etc. + +See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. + diff --git a/MAINTAINERS b/MAINTAINERS index 6847372cfab8..0f38cba2c581 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6046,6 +6046,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h +EROFS FILE SYSTEM +M: Gao Xiang +M: Chao Yu +L: linux-erofs@lists.ozlabs.org +S: Maintained +F: fs/erofs/ + ERRSEQ ERROR TRACKING INFRASTRUCTURE M: Jeff Layton S: Maintained @@ -15229,13 +15236,6 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ -STAGING - EROFS FILE SYSTEM -M: Gao Xiang -M: Chao Yu -L: linux-erofs@lists.ozlabs.org -S: Maintained -F: drivers/staging/erofs/ - STAGING - FIELDBUS SUBSYSTEM M: Sven Van Asbroeck S: Maintained diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 7c96a01eef6c..d972ec8e71fb 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -112,8 +112,6 @@ source "drivers/staging/gasket/Kconfig" source "drivers/staging/axis-fifo/Kconfig" -source "drivers/staging/erofs/Kconfig" - source "drivers/staging/fieldbus/Kconfig" source "drivers/staging/kpc2000/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fcaac9693b83..6018b9a4a077 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -46,7 +46,6 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ -obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_ISDN_CAPI) += isdn/ diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt deleted file mode 100644 index 0eab600ca7ca..000000000000 --- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt +++ /dev/null @@ -1,223 +0,0 @@ -Overview -======== - -EROFS file-system stands for Enhanced Read-Only File System. Different -from other read-only file systems, it aims to be designed for flexibility, -scalability, but be kept simple and high performance. - -It is designed as a better filesystem solution for the following scenarios: - - read-only storage media or - - - part of a fully trusted read-only solution, which means it needs to be - immutable and bit-for-bit identical to the official golden image for - their releases due to security and other considerations and - - - hope to save some extra storage space with guaranteed end-to-end performance - by using reduced metadata and transparent file compression, especially - for those embedded devices with limited memory (ex, smartphone); - -Here is the main features of EROFS: - - Little endian on-disk design; - - - Currently 4KB block size (nobh) and therefore maximum 16TB address space; - - - Metadata & data could be mixed by design; - - - 2 inode versions for different requirements: - v1 v2 - Inode metadata size: 32 bytes 64 bytes - Max file size: 4 GB 16 EB (also limited by max. vol size) - Max uids/gids: 65536 4294967296 - File creation time: no yes (64 + 32-bit timestamp) - Max hardlinks: 65536 4294967296 - Metadata reserved: 4 bytes 14 bytes - - - Support extended attributes (xattrs) as an option; - - - Support xattr inline and tail-end data inline for all files; - - - Support POSIX.1e ACLs by using xattrs; - - - Support transparent file compression as an option: - LZ4 algorithm with 4 KB fixed-output compression for high performance; - -The following git tree provides the file system user-space tools under -development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -Bugs and patches are welcome, please kindly help us and send to the following -linux-erofs mailing list: ->> linux-erofs mailing list - -Note that EROFS is still working in progress as a Linux staging driver, -Cc the staging mailing list as well is highly recommended: ->> Linux Driver Project Developer List - -Mount options -============= - -fault_injection=%d Enable fault injection in all supported types with - specified injection rate. Supported injection type: - Type_Name Type_Value - FAULT_KMALLOC 0x000000001 - FAULT_READ_IO 0x000000002 -(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled - by default if CONFIG_EROFS_FS_XATTR is selected. -(no)acl Setup POSIX Access Control List. Note: acl is enabled - by default if CONFIG_EROFS_FS_POSIX_ACL is selected. -cache_strategy=%s Select a strategy for cached decompression from now on: - disabled: In-place I/O decompression only; - readahead: Cache the last incomplete compressed physical - cluster for further reading. It still does - in-place I/O decompression for the rest - compressed physical clusters; - readaround: Cache the both ends of incomplete compressed - physical clusters for further reading. - It still does in-place I/O decompression - for the rest compressed physical clusters. - -Module parameters -================= -use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). - -On-disk details -=============== - -Summary -------- -Different from other read-only file systems, an EROFS volume is designed -to be as simple as possible: - - |-> aligned with the block size - ____________________________________________________________ - | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | - |_|__|_|_____|__________|_____|______|__________|_____|______| - 0 +1K - -All data areas should be aligned with the block size, but metadata areas -may not. All metadatas can be now observed in two different spaces (views): - 1. Inode metadata space - Each valid inode should be aligned with an inode slot, which is a fixed - value (32 bytes) and designed to be kept in line with v1 inode size. - - Each inode can be directly found with the following formula: - inode offset = meta_blkaddr * block_size + 32 * nid - - |-> aligned with 8B - |-> followed closely - + meta_blkaddr blocks |-> another slot - _____________________________________________________________________ - | ... | inode | xattrs | extents | data inline | ... | inode ... - |________|_______|(optional)|(optional)|__(optional)_|_____|__________ - |-> aligned with the inode slot size - . . - . . - . . - . . - . . - . . - .____________________________________________________|-> aligned with 4B - | xattr_ibody_header | shared xattrs | inline xattrs | - |____________________|_______________|_______________| - |-> 12 bytes <-|->x * 4 bytes<-| . - . . . - . . . - . . . - ._______________________________.______________________. - | id | id | id | id | ... | id | ent | ... | ent| ... | - |____|____|____|____|______|____|_____|_____|____|_____| - |-> aligned with 4B - |-> aligned with 4B - - Inode could be 32 or 64 bytes, which can be distinguished from a common - field which all inode versions have -- i_advise: - - __________________ __________________ - | i_advise | | i_advise | - |__________________| |__________________| - | ... | | ... | - | | | | - |__________________| 32 bytes | | - | | - |__________________| 64 bytes - - Xattrs, extents, data inline are followed by the corresponding inode with - proper alignes, and they could be optional for different data mappings, - _currently_ there are totally 3 valid data mappings supported: - - 1) flat file data without data inline (no extent); - 2) fixed-output size data compression (must have extents); - 3) flat file data with tail-end data inline (no extent); - - The size of the optional xattrs is indicated by i_xattr_count in inode - header. Large xattrs or xattrs shared by many different files can be - stored in shared xattrs metadata rather than inlined right after inode. - - 2. Shared xattrs metadata space - Shared xattrs space is similar to the above inode space, started with - a specific block indicated by xattr_blkaddr, organized one by one with - proper align. - - Each share xattr can also be directly found by the following formula: - xattr offset = xattr_blkaddr * block_size + 4 * xattr_id - - |-> aligned by 4 bytes - + xattr_blkaddr blocks |-> aligned with 4 bytes - _________________________________________________________________________ - | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... - |________|_____________|_____________|_____|______________|_______________ - -Directories ------------ -All directories are now organized in a compact on-disk format. Note that -each directory block is divided into index and name areas in order to support -random file lookup, and all directory entries are _strictly_ recorded in -alphabetical order in order to support improved prefix binary search -algorithm (could refer to the related source code). - - ___________________________ - / | - / ______________|________________ - / / | nameoff1 | nameoffN-1 - ____________.______________._______________v________________v__________ -| dirent | dirent | ... | dirent | filename | filename | ... | filename | -|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| - \ ^ - \ | * could have - \ | trailing '\0' - \________________________| nameoff0 - - Directory block - -Note that apart from the offset of the first filename, nameoff0 also indicates -the total number of directory entries in this block since it is no need to -introduce another on-disk field at all. - -Compression ------------ -Currently, EROFS supports 4KB fixed-output clustersize transparent file -compression, as illustrated below: - - |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- - clusterofs clusterofs clusterofs - | | | logical data -_________v_______________________________v_____________________v_______________ -... | . | | . | | . | ... -____|____.________|_____________|________.____|_____________|__.__________|____ - |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| - size size size size size - . . . . - . . . . - . . . . - _______._____________._____________._____________._____________________ - ... | | | | ... physical data - _______|_____________|_____________|_____________|_____________________ - |-> cluster <-|-> cluster <-|-> cluster <-| - size size size - -Currently each on-disk physical cluster can contain 4KB (un)compressed data -at most. For each logical cluster, there is a corresponding on-disk index to -describe its cluster type, physical cluster address, etc. - -See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. - diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig deleted file mode 100644 index 16316d1adca3..000000000000 --- a/drivers/staging/erofs/Kconfig +++ /dev/null @@ -1,98 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config EROFS_FS - tristate "EROFS filesystem support" - depends on BLOCK - help - EROFS (Enhanced Read-Only File System) is a lightweight - read-only file system with modern designs (eg. page-sized - blocks, inline xattrs/data, etc.) for scenarios which need - high-performance read-only requirements, e.g. Android OS - for mobile phones and LIVECDs. - - It also provides fixed-sized output compression support, - which improves storage density, keeps relatively higher - compression ratios, which is more useful to achieve high - performance for embedded devices with limited memory. - - If unsure, say N. - -config EROFS_FS_DEBUG - bool "EROFS debugging feature" - depends on EROFS_FS - help - Print debugging messages and enable more BUG_ONs which check - filesystem consistency and find potential issues aggressively, - which can be used for Android eng build, for example. - - For daily use, say N. - -config EROFS_FAULT_INJECTION - bool "EROFS fault injection facility" - depends on EROFS_FS - help - Test EROFS to inject faults such as ENOMEM, EIO, and so on. - If unsure, say N. - -config EROFS_FS_XATTR - bool "EROFS extended attributes" - depends on EROFS_FS - default y - help - Extended attributes are name:value pairs associated with inodes by - the kernel or by users (see the attr(5) manual page, or visit - for details). - - If unsure, say N. - -config EROFS_FS_POSIX_ACL - bool "EROFS Access Control Lists" - depends on EROFS_FS_XATTR - select FS_POSIX_ACL - default y - help - Posix Access Control Lists (ACLs) support permissions for users and - groups beyond the owner/group/world scheme. - - To learn more about Access Control Lists, visit the POSIX ACLs for - Linux website . - - If you don't know what Access Control Lists are, say N. - -config EROFS_FS_SECURITY - bool "EROFS Security Labels" - depends on EROFS_FS_XATTR - default y - help - Security labels provide an access control facility to support Linux - Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO - Linux. This option enables an extended attribute handler for file - security labels in the erofs filesystem, so that it requires enabling - the extended attribute support in advance. - - If you are not using a security module, say N. - -config EROFS_FS_ZIP - bool "EROFS Data Compression Support" - depends on EROFS_FS - select LZ4_DECOMPRESS - default y - help - Enable fixed-sized output compression for EROFS. - - If you don't want to enable compression feature, say N. - -config EROFS_FS_CLUSTER_PAGE_LIMIT - int "EROFS Cluster Pages Hard Limit" - depends on EROFS_FS_ZIP - range 1 256 - default "1" - help - Indicates maximum # of pages of a compressed - physical cluster. - - For example, if files in a image were compressed - into 8k-unit, hard limit should not be configured - less than 2. Otherwise, the image will be refused - to mount on this kernel. - diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile deleted file mode 100644 index 5cdae21cb5af..000000000000 --- a/drivers/staging/erofs/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -EROFS_VERSION = "1.0pre1" - -ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" - -obj-$(CONFIG_EROFS_FS) += erofs.o -# staging requirement: to be self-contained in its own directory -ccflags-y += -I $(srctree)/$(src)/include -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o -erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o - diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO deleted file mode 100644 index a8608b2f72bd..000000000000 --- a/drivers/staging/erofs/TODO +++ /dev/null @@ -1,46 +0,0 @@ - -EROFS is still working in progress, thus it is not suitable -for all productive uses. play at your own risk :) - -TODO List: - - add the missing error handling code - (mainly existed in xattr and decompression submodules); - - - finalize erofs ondisk format design (which means that - minor on-disk revisions could happen later); - - - documentation and detailed technical analysis; - - - general code review and clean up - (including confusing variable names and code snippets); - - - support larger compressed clustersizes for selection - (currently erofs only works as expected with the page-sized - compressed cluster configuration, usually 4KB); - - - support more lossless data compression algorithms - in addition to LZ4 algorithms in VLE approach; - - - data deduplication and other useful features. - -The following git tree provides the file system user-space -tools under development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -The open-source development of erofs-utils is at the early stage. -Contact the original author Li Guifu and -the co-maintainer Fang Wei for the latest news -and more details. - -Code, suggestions, etc, are welcome. Please feel free to -ask and send patches, - -To: - linux-erofs mailing list - Gao Xiang - Chao Yu - -Cc: (for linux-kernel upstream patches) - Greg Kroah-Hartman - linux-staging mailing list - diff --git a/drivers/staging/erofs/compress.h b/drivers/staging/erofs/compress.h deleted file mode 100644 index 043013f9ef1b..000000000000 --- a/drivers/staging/erofs/compress.h +++ /dev/null @@ -1,62 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/compress.h - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_COMPRESS_H -#define __EROFS_FS_COMPRESS_H - -#include "internal.h" - -enum { - Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, - Z_EROFS_COMPRESSION_RUNTIME_MAX -}; - -struct z_erofs_decompress_req { - struct super_block *sb; - struct page **in, **out; - - unsigned short pageofs_out; - unsigned int inputsize, outputsize; - - /* indicate the algorithm will be used for decompression */ - unsigned int alg; - bool inplace_io, partial_decoding; -}; - -/* - * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - - * used to mark temporary allocated pages from other - * file/cached pages and NULL mapping pages. - */ -#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) - -/* check if a page is marked as staging */ -static inline bool z_erofs_page_is_staging(struct page *page) -{ - return page->mapping == Z_EROFS_MAPPING_STAGING; -} - -static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, - struct page *page) -{ - if (!z_erofs_page_is_staging(page)) - return false; - - /* staging pages should not be used by others at the same time */ - if (page_ref_count(page) > 1) - put_page(page); - else - list_add(&page->lru, pagepool); - return true; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - -#endif - diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c deleted file mode 100644 index 72c4b4c5296b..000000000000 --- a/drivers/staging/erofs/data.c +++ /dev/null @@ -1,425 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/data.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -#include - -static inline void read_endio(struct bio *bio) -{ - struct super_block *const sb = bio->bi_private; - struct bio_vec *bvec; - blk_status_t err = bio->bi_status; - struct bvec_iter_all iter_all; - - if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - - /* page is already locked */ - DBG_BUGON(PageUptodate(page)); - - if (unlikely(err)) - SetPageError(page); - else - SetPageUptodate(page); - - unlock_page(page); - /* page could be reclaimed now */ - } - bio_put(bio); -} - -/* prio -- true is used for dir */ -struct page *__erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio, bool nofail) -{ - struct inode *const bd_inode = sb->s_bdev->bd_inode; - struct address_space *const mapping = bd_inode->i_mapping; - /* prefer retrying in the allocator to blindly looping below */ - const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | - (nofail ? __GFP_NOFAIL : 0); - unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; - struct page *page; - int err; - -repeat: - page = find_or_create_page(mapping, blkaddr, gfp); - if (unlikely(!page)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - DBG_BUGON(!PageLocked(page)); - - if (!PageUptodate(page)) { - struct bio *bio; - - bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); - if (IS_ERR(bio)) { - DBG_BUGON(nofail); - err = PTR_ERR(bio); - goto err_out; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (unlikely(err != PAGE_SIZE)) { - err = -EFAULT; - goto err_out; - } - - __submit_bio(bio, REQ_OP_READ, - REQ_META | (prio ? REQ_PRIO : 0)); - - lock_page(page); - - /* this page has been truncated by others */ - if (unlikely(page->mapping != mapping)) { -unlock_repeat: - unlock_page(page); - put_page(page); - goto repeat; - } - - /* more likely a read error */ - if (unlikely(!PageUptodate(page))) { - if (io_retries) { - --io_retries; - goto unlock_repeat; - } - err = -EIO; - goto err_out; - } - } - return page; - -err_out: - unlock_page(page); - put_page(page); - return ERR_PTR(err); -} - -static int erofs_map_blocks_flatmode(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - int err = 0; - erofs_blk_t nblocks, lastblk; - u64 offset = map->m_la; - struct erofs_vnode *vi = EROFS_V(inode); - - trace_erofs_map_blocks_flatmode_enter(inode, map, flags); - - nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); - lastblk = nblocks - is_inode_flat_inline(inode); - - if (unlikely(offset >= inode->i_size)) { - /* leave out-of-bound access unmapped */ - map->m_flags = 0; - map->m_plen = 0; - goto out; - } - - /* there is no hole in flatmode */ - map->m_flags = EROFS_MAP_MAPPED; - - if (offset < blknr_to_addr(lastblk)) { - map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; - map->m_plen = blknr_to_addr(lastblk) - offset; - } else if (is_inode_flat_inline(inode)) { - /* 2 - inode inline B: inode, [xattrs], inline last blk... */ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + - vi->xattr_isize + erofs_blkoff(map->m_la); - map->m_plen = inode->i_size - offset; - - /* inline data should be located in one meta block */ - if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - err = -EFSCORRUPTED; - goto err_out; - } - - map->m_flags |= EROFS_MAP_META; - } else { - errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", - vi->nid, inode->i_size, map->m_la); - DBG_BUGON(1); - err = -EIO; - goto err_out; - } - -out: - map->m_llen = map->m_plen; - -err_out: - trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); - return err; -} - -int erofs_map_blocks(struct inode *inode, - struct erofs_map_blocks *map, int flags) -{ - if (unlikely(is_inode_layout_compression(inode))) { - int err = z_erofs_map_blocks_iter(inode, map, flags); - - if (map->mpage) { - put_page(map->mpage); - map->mpage = NULL; - } - return err; - } - return erofs_map_blocks_flatmode(inode, map, flags); -} - -static inline struct bio *erofs_read_raw_page(struct bio *bio, - struct address_space *mapping, - struct page *page, - erofs_off_t *last_block, - unsigned int nblocks, - bool ra) -{ - struct inode *const inode = mapping->host; - struct super_block *const sb = inode->i_sb; - erofs_off_t current_block = (erofs_off_t)page->index; - int err; - - DBG_BUGON(!nblocks); - - if (PageUptodate(page)) { - err = 0; - goto has_updated; - } - - /* note that for readpage case, bio also equals to NULL */ - if (bio && - /* not continuous */ - *last_block + 1 != current_block) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - struct erofs_map_blocks map = { - .m_la = blknr_to_addr(current_block), - }; - erofs_blk_t blknr; - unsigned int blkoff; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (unlikely(err)) - goto err_out; - - /* zero out the holed page */ - if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, 0, PAGE_SIZE); - SetPageUptodate(page); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* for RAW access mode, m_plen must be equal to m_llen */ - DBG_BUGON(map.m_plen != map.m_llen); - - blknr = erofs_blknr(map.m_pa); - blkoff = erofs_blkoff(map.m_pa); - - /* deal with inline page */ - if (map.m_flags & EROFS_MAP_META) { - void *vsrc, *vto; - struct page *ipage; - - DBG_BUGON(map.m_plen > PAGE_SIZE); - - ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); - - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); - goto err_out; - } - - vsrc = kmap_atomic(ipage); - vto = kmap_atomic(page); - memcpy(vto, vsrc + blkoff, map.m_plen); - memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); - kunmap_atomic(vto); - kunmap_atomic(vsrc); - flush_dcache_page(page); - - SetPageUptodate(page); - /* TODO: could we unlock the page earlier? */ - unlock_page(ipage); - put_page(ipage); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* pa must be block-aligned for raw reading */ - DBG_BUGON(erofs_blkoff(map.m_pa)); - - /* max # of continuous pages */ - if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) - nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); - if (nblocks > BIO_MAX_PAGES) - nblocks = BIO_MAX_PAGES; - - bio = erofs_grab_bio(sb, blknr, nblocks, sb, - read_endio, false); - if (IS_ERR(bio)) { - err = PTR_ERR(bio); - bio = NULL; - goto err_out; - } - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - /* out of the extent or bio is full */ - if (err < PAGE_SIZE) - goto submit_bio_retry; - - *last_block = current_block; - - /* shift in advance in case of it followed by too many gaps */ - if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { - /* err should reassign to 0 after submitting */ - err = 0; - goto submit_bio_out; - } - - return bio; - -err_out: - /* for sync reading, set page error immediately */ - if (!ra) { - SetPageError(page); - ClearPageUptodate(page); - } -has_updated: - unlock_page(page); - - /* if updated manually, continuous pages has a gap */ - if (bio) -submit_bio_out: - __submit_bio(bio, REQ_OP_READ, 0); - - return unlikely(err) ? ERR_PTR(err) : NULL; -} - -/* - * since we dont have write or truncate flows, so no inode - * locking needs to be held at the moment. - */ -static int erofs_raw_access_readpage(struct file *file, struct page *page) -{ - erofs_off_t last_block; - struct bio *bio; - - trace_erofs_readpage(page, true); - - bio = erofs_read_raw_page(NULL, page->mapping, - page, &last_block, 1, false); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ - return 0; -} - -static int erofs_raw_access_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - erofs_off_t last_block; - struct bio *bio = NULL; - gfp_t gfp = readahead_gfp_mask(mapping); - struct page *page = list_last_entry(pages, struct page, lru); - - trace_erofs_readpages(mapping->host, page, nr_pages, true); - - for (; nr_pages; --nr_pages) { - page = list_entry(pages->prev, struct page, lru); - - prefetchw(&page->flags); - list_del(&page->lru); - - if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { - bio = erofs_read_raw_page(bio, mapping, page, - &last_block, nr_pages, true); - - /* all the page errors are ignored when readahead */ - if (IS_ERR(bio)) { - pr_err("%s, readahead error at page %lu of nid %llu\n", - __func__, page->index, - EROFS_V(mapping->host)->nid); - - bio = NULL; - } - } - - /* pages could still be locked */ - put_page(page); - } - DBG_BUGON(!list_empty(pages)); - - /* the rare case (end in gaps) */ - if (unlikely(bio)) - __submit_bio(bio, REQ_OP_READ, 0); - return 0; -} - -static int erofs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - struct erofs_map_blocks map = { - .m_la = iblock << 9, - }; - int err; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (err) - return err; - - if (map.m_flags & EROFS_MAP_MAPPED) - bh->b_blocknr = erofs_blknr(map.m_pa); - - return err; -} - -static sector_t erofs_bmap(struct address_space *mapping, sector_t block) -{ - struct inode *inode = mapping->host; - - if (is_inode_flat_inline(inode)) { - erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; - - if (block >> LOG_SECTORS_PER_BLOCK >= blks) - return 0; - } - - return generic_block_bmap(mapping, block, erofs_get_block); -} - -/* for uncompressed (aligned) files and raw access for other files */ -const struct address_space_operations erofs_raw_access_aops = { - .readpage = erofs_raw_access_readpage, - .readpages = erofs_raw_access_readpages, - .bmap = erofs_bmap, -}; - diff --git a/drivers/staging/erofs/decompressor.c b/drivers/staging/erofs/decompressor.c deleted file mode 100644 index 32a811ac704a..000000000000 --- a/drivers/staging/erofs/decompressor.c +++ /dev/null @@ -1,360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/decompressor.c - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "compress.h" -#include -#include - -#ifndef LZ4_DISTANCE_MAX /* history window size */ -#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) -#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN -#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) -#endif - -struct z_erofs_decompressor { - /* - * if destpages have sparsed pages, fill them with bounce pages. - * it also check whether destpages indicate continuous physical memory. - */ - int (*prepare_destpages)(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); - char *name; -}; - -static bool use_vmap; -module_param(use_vmap, bool, 0444); -MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); - -static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nr = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; - unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, - BITS_PER_LONG)] = { 0 }; - void *kaddr = NULL; - unsigned int i, j, top; - - top = 0; - for (i = j = 0; i < nr; ++i, ++j) { - struct page *const page = rq->out[i]; - struct page *victim; - - if (j >= LZ4_MAX_DISTANCE_PAGES) - j = 0; - - /* 'valid' bounced can only be tested after a complete round */ - if (test_bit(j, bounced)) { - DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); - DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); - availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; - } - - if (page) { - __clear_bit(j, bounced); - if (kaddr) { - if (kaddr + PAGE_SIZE == page_address(page)) - kaddr += PAGE_SIZE; - else - kaddr = NULL; - } else if (!i) { - kaddr = page_address(page); - } - continue; - } - kaddr = NULL; - __set_bit(j, bounced); - - if (top) { - victim = availables[--top]; - get_page(victim); - } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL, false); - if (unlikely(!victim)) - return -ENOMEM; - victim->mapping = Z_EROFS_MAPPING_STAGING; - } - rq->out[i] = victim; - } - return kaddr ? 1 : 0; -} - -static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, - u8 *src, unsigned int pageofs_in) -{ - /* - * if in-place decompression is ongoing, those decompressed - * pages should be copied in order to avoid being overlapped. - */ - struct page **in = rq->in; - u8 *const tmp = erofs_get_pcpubuf(0); - u8 *tmpp = tmp; - unsigned int inlen = rq->inputsize - pageofs_in; - unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); - - while (tmpp < tmp + inlen) { - if (!src) - src = kmap_atomic(*in); - memcpy(tmpp, src + pageofs_in, count); - kunmap_atomic(src); - src = NULL; - tmpp += count; - pageofs_in = 0; - count = PAGE_SIZE; - ++in; - } - return tmp; -} - -static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) -{ - unsigned int inputmargin, inlen; - u8 *src; - bool copied, support_0padding; - int ret; - - if (rq->inputsize > PAGE_SIZE) - return -EOPNOTSUPP; - - src = kmap_atomic(*rq->in); - inputmargin = 0; - support_0padding = false; - - /* decompression inplace is only safe when 0padding is enabled */ - if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { - support_0padding = true; - - while (!src[inputmargin & ~PAGE_MASK]) - if (!(++inputmargin & ~PAGE_MASK)) - break; - - if (inputmargin >= rq->inputsize) { - kunmap_atomic(src); - return -EIO; - } - } - - copied = false; - inlen = rq->inputsize - inputmargin; - if (rq->inplace_io) { - const uint oend = (rq->pageofs_out + - rq->outputsize) & ~PAGE_MASK; - const uint nr = PAGE_ALIGN(rq->pageofs_out + - rq->outputsize) >> PAGE_SHIFT; - - if (rq->partial_decoding || !support_0padding || - rq->out[nr - 1] != rq->in[0] || - rq->inputsize - oend < - LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { - src = generic_copy_inplace_data(rq, src, inputmargin); - inputmargin = 0; - copied = true; - } - } - - ret = LZ4_decompress_safe_partial(src + inputmargin, out, - inlen, rq->outputsize, - rq->outputsize); - if (ret < 0) { - errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", - __func__, src + inputmargin, inlen, inputmargin, - out, rq->outputsize); - WARN_ON(1); - print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, inlen, true); - print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, - 16, 1, out, rq->outputsize, true); - ret = -EIO; - } - - if (copied) - erofs_put_pcpubuf(src); - else - kunmap_atomic(src); - return ret; -} - -static struct z_erofs_decompressor decompressors[] = { - [Z_EROFS_COMPRESSION_SHIFTED] = { - .name = "shifted" - }, - [Z_EROFS_COMPRESSION_LZ4] = { - .prepare_destpages = lz4_prepare_destpages, - .decompress = lz4_decompress, - .name = "lz4" - }, -}; - -static void copy_from_pcpubuf(struct page **out, const char *dst, - unsigned short pageofs_out, - unsigned int outputsize) -{ - const char *end = dst + outputsize; - const unsigned int righthalf = PAGE_SIZE - pageofs_out; - const char *cur = dst - pageofs_out; - - while (cur < end) { - struct page *const page = *out++; - - if (page) { - char *buf = kmap_atomic(page); - - if (cur >= dst) { - memcpy(buf, cur, min_t(uint, PAGE_SIZE, - end - cur)); - } else { - memcpy(buf + pageofs_out, cur + pageofs_out, - min_t(uint, righthalf, end - cur)); - } - kunmap_atomic(buf); - } - cur += PAGE_SIZE; - } -} - -static void *erofs_vmap(struct page **pages, unsigned int count) -{ - int i = 0; - - if (use_vmap) - return vmap(pages, count, VM_MAP, PAGE_KERNEL); - - while (1) { - void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); - - /* retry two more times (totally 3 times) */ - if (addr || ++i >= 3) - return addr; - vm_unmap_aliases(); - } - return NULL; -} - -static void erofs_vunmap(const void *mem, unsigned int count) -{ - if (!use_vmap) - vm_unmap_ram(mem, count); - else - vunmap(mem); -} - -static int decompress_generic(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const struct z_erofs_decompressor *alg = decompressors + rq->alg; - unsigned int dst_maptype; - void *dst; - int ret; - - if (nrpages_out == 1 && !rq->inplace_io) { - DBG_BUGON(!*rq->out); - dst = kmap_atomic(*rq->out); - dst_maptype = 0; - goto dstmap_out; - } - - /* - * For the case of small output size (especially much less - * than PAGE_SIZE), memcpy the decompressed data rather than - * compressed data is preferred. - */ - if (rq->outputsize <= PAGE_SIZE * 7 / 8) { - dst = erofs_get_pcpubuf(0); - if (IS_ERR(dst)) - return PTR_ERR(dst); - - rq->inplace_io = false; - ret = alg->decompress(rq, dst); - if (!ret) - copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, - rq->outputsize); - - erofs_put_pcpubuf(dst); - return ret; - } - - ret = alg->prepare_destpages(rq, pagepool); - if (ret < 0) { - return ret; - } else if (ret) { - dst = page_address(*rq->out); - dst_maptype = 1; - goto dstmap_out; - } - - dst = erofs_vmap(rq->out, nrpages_out); - if (!dst) - return -ENOMEM; - dst_maptype = 2; - -dstmap_out: - ret = alg->decompress(rq, dst + rq->pageofs_out); - - if (!dst_maptype) - kunmap_atomic(dst); - else if (dst_maptype == 2) - erofs_vunmap(dst, nrpages_out); - return ret; -} - -static int shifted_decompress(const struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; - unsigned char *src, *dst; - - if (nrpages_out > 2) { - DBG_BUGON(1); - return -EIO; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(nrpages_out != 1); - return 0; - } - - src = kmap_atomic(*rq->in); - if (!rq->out[0]) { - dst = NULL; - } else { - dst = kmap_atomic(rq->out[0]); - memcpy(dst + rq->pageofs_out, src, righthalf); - } - - if (rq->out[1] == *rq->in) { - memmove(src, src + righthalf, rq->pageofs_out); - } else if (nrpages_out == 2) { - if (dst) - kunmap_atomic(dst); - DBG_BUGON(!rq->out[1]); - dst = kmap_atomic(rq->out[1]); - memcpy(dst, src + righthalf, rq->pageofs_out); - } - if (dst) - kunmap_atomic(dst); - kunmap_atomic(src); - return 0; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) - return shifted_decompress(rq, pagepool); - return decompress_generic(rq, pagepool); -} - diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c deleted file mode 100644 index 77ef856df9f3..000000000000 --- a/drivers/staging/erofs/dir.c +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/dir.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" - -static void debug_one_dentry(unsigned char d_type, const char *de_name, - unsigned int de_namelen) -{ -#ifdef CONFIG_EROFS_FS_DEBUG - /* since the on-disk name could not have the trailing '\0' */ - unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; - - memcpy(dbg_namebuf, de_name, de_namelen); - dbg_namebuf[de_namelen] = '\0'; - - debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, - de_namelen, d_type); -#endif -} - -static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, - void *dentry_blk, unsigned int *ofs, - unsigned int nameoff, unsigned int maxsize) -{ - struct erofs_dirent *de = dentry_blk + *ofs; - const struct erofs_dirent *end = dentry_blk + nameoff; - - while (de < end) { - const char *de_name; - unsigned int de_namelen; - unsigned char d_type; - - d_type = fs_ftype_to_dtype(de->file_type); - - nameoff = le16_to_cpu(de->nameoff); - de_name = (char *)dentry_blk + nameoff; - - /* the last dirent in the block? */ - if (de + 1 >= end) - de_namelen = strnlen(de_name, maxsize - nameoff); - else - de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; - - /* a corrupted entry is found */ - if (unlikely(nameoff + de_namelen > maxsize || - de_namelen > EROFS_NAME_LEN)) { - errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - debug_one_dentry(d_type, de_name, de_namelen); - if (!dir_emit(ctx, de_name, de_namelen, - le64_to_cpu(de->nid), d_type)) - /* stopped by some reason */ - return 1; - ++de; - *ofs += sizeof(struct erofs_dirent); - } - *ofs = maxsize; - return 0; -} - -static int erofs_readdir(struct file *f, struct dir_context *ctx) -{ - struct inode *dir = file_inode(f); - struct address_space *mapping = dir->i_mapping; - const size_t dirsize = i_size_read(dir); - unsigned int i = ctx->pos / EROFS_BLKSIZ; - unsigned int ofs = ctx->pos % EROFS_BLKSIZ; - int err = 0; - bool initial = true; - - while (ctx->pos < dirsize) { - struct page *dentry_page; - struct erofs_dirent *de; - unsigned int nameoff, maxsize; - - dentry_page = read_mapping_page(mapping, i, NULL); - if (dentry_page == ERR_PTR(-ENOMEM)) { - err = -ENOMEM; - break; - } else if (IS_ERR(dentry_page)) { - errln("fail to readdir of logical block %u of nid %llu", - i, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - break; - } - - de = (struct erofs_dirent *)kmap(dentry_page); - - nameoff = le16_to_cpu(de->nameoff); - - if (unlikely(nameoff < sizeof(struct erofs_dirent) || - nameoff >= PAGE_SIZE)) { - errln("%s, invalid de[0].nameoff %u @ nid %llu", - __func__, nameoff, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - goto skip_this; - } - - maxsize = min_t(unsigned int, - dirsize - ctx->pos + ofs, PAGE_SIZE); - - /* search dirents at the arbitrary position */ - if (unlikely(initial)) { - initial = false; - - ofs = roundup(ofs, sizeof(struct erofs_dirent)); - if (unlikely(ofs >= nameoff)) - goto skip_this; - } - - err = erofs_fill_dentries(dir, ctx, de, &ofs, - nameoff, maxsize); -skip_this: - kunmap(dentry_page); - - put_page(dentry_page); - - ctx->pos = blknr_to_addr(i) + ofs; - - if (unlikely(err)) - break; - ++i; - ofs = 0; - } - return err < 0 ? err : 0; -} - -const struct file_operations erofs_dir_fops = { - .llseek = generic_file_llseek, - .read = generic_read_dir, - .iterate_shared = erofs_readdir, -}; - diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h deleted file mode 100644 index 6db70f395937..000000000000 --- a/drivers/staging/erofs/erofs_fs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ -/* - * linux/drivers/staging/erofs/erofs_fs.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_H -#define __EROFS_FS_H - -/* Enhanced(Extended) ROM File System */ -#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 -#define EROFS_SUPER_OFFSET 1024 - -/* - * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be - * incompatible with this kernel version. - */ -#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 -#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING - -struct erofs_super_block { -/* 0 */__le32 magic; /* in the little endian */ -/* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; /* (aka. feature_compat) */ -/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ -/* 13 */__u8 reserved; - -/* 14 */__le16 root_nid; -/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ - -/* 24 */__le64 build_time; /* inode v1 time derivation */ -/* 32 */__le32 build_time_nsec; -/* 36 */__le32 blocks; /* used for statfs */ -/* 40 */__le32 meta_blkaddr; -/* 44 */__le32 xattr_blkaddr; -/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ -/* 64 */__u8 volume_name[16]; /* volume name */ -/* 80 */__le32 requirements; /* (aka. feature_incompat) */ - -/* 84 */__u8 reserved2[44]; -} __packed; /* 128 bytes */ - -/* - * erofs inode data mapping: - * 0 - inode plain without inline data A: - * inode, [xattrs], ... | ... | no-holed data - * 1 - inode VLE compression B (legacy): - * inode, [xattrs], extents ... | ... - * 2 - inode plain with inline data C: - * inode, [xattrs], last_inline_data, ... | ... | no-holed data - * 3 - inode compression D: - * inode, [xattrs], map_header, extents ... | ... - * 4~7 - reserved - */ -enum { - EROFS_INODE_FLAT_PLAIN, - EROFS_INODE_FLAT_COMPRESSION_LEGACY, - EROFS_INODE_FLAT_INLINE, - EROFS_INODE_FLAT_COMPRESSION, - EROFS_INODE_LAYOUT_MAX -}; - -static inline bool erofs_inode_is_data_compressed(unsigned int datamode) -{ - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return true; - return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; -} - -/* bit definitions of inode i_advise */ -#define EROFS_I_VERSION_BITS 1 -#define EROFS_I_DATA_MAPPING_BITS 3 - -#define EROFS_I_VERSION_BIT 0 -#define EROFS_I_DATA_MAPPING_BIT 1 - -struct erofs_inode_v1 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_nlink; -/* 8 */__le32 i_size; -/* 12 */__le32 i_reserved; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; -/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ -/* 24 */__le16 i_uid; -/* 26 */__le16 i_gid; -/* 28 */__le32 i_reserved2; -} __packed; - -/* 32 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V1 0 -/* 64 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V2 1 - -struct erofs_inode_v2 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_reserved; -/* 8 */__le64 i_size; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; - - /* only used for 32-bit stat compatibility */ -/* 20 */__le32 i_ino; - -/* 24 */__le32 i_uid; -/* 28 */__le32 i_gid; -/* 32 */__le64 i_ctime; -/* 40 */__le32 i_ctime_nsec; -/* 44 */__le32 i_nlink; -/* 48 */__u8 i_reserved2[16]; -} __packed; /* 64 bytes */ - -#define EROFS_MAX_SHARED_XATTRS (128) -/* h_shared_count between 129 ... 255 are special # */ -#define EROFS_SHARED_XATTR_EXTENT (255) - -/* - * inline xattrs (n == i_xattr_icount): - * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes - * 12 bytes / \ - * / \ - * /-----------------------\ - * | erofs_xattr_entries+ | - * +-----------------------+ - * inline xattrs must starts in erofs_xattr_ibody_header, - * for read-only fs, no need to introduce h_refcount - */ -struct erofs_xattr_ibody_header { - __le32 h_reserved; - __u8 h_shared_count; - __u8 h_reserved2[7]; - __le32 h_shared_xattrs[0]; /* shared xattr id array */ -} __packed; - -/* Name indexes */ -#define EROFS_XATTR_INDEX_USER 1 -#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 -#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 -#define EROFS_XATTR_INDEX_TRUSTED 4 -#define EROFS_XATTR_INDEX_LUSTRE 5 -#define EROFS_XATTR_INDEX_SECURITY 6 - -/* xattr entry (for both inline & shared xattrs) */ -struct erofs_xattr_entry { - __u8 e_name_len; /* length of name */ - __u8 e_name_index; /* attribute name index */ - __le16 e_value_size; /* size of attribute value */ - /* followed by e_name and e_value */ - char e_name[0]; /* attribute name */ -} __packed; - -#define ondisk_xattr_ibody_size(count) ({\ - u32 __count = le16_to_cpu(count); \ - ((__count) == 0) ? 0 : \ - sizeof(struct erofs_xattr_ibody_header) + \ - sizeof(__u32) * ((__count) - 1); }) - -#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) -#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ - sizeof(struct erofs_xattr_entry) + \ - (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) - -/* available compression algorithm types */ -enum { - Z_EROFS_COMPRESSION_LZ4, - Z_EROFS_COMPRESSION_MAX -}; - -/* - * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) - * e.g. for 4k logical cluster size, 4B if compacted 2B is off; - * (4B) + 2B + (4B) if compacted 2B is on. - */ -#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 - -#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) - -struct z_erofs_map_header { - __le32 h_reserved1; - __le16 h_advise; - /* - * bit 0-3 : algorithm type of head 1 (logical cluster type 01); - * bit 4-7 : algorithm type of head 2 (logical cluster type 11). - */ - __u8 h_algorithmtype; - /* - * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; - * bit 3-4 : (physical - logical) cluster bits of head 1: - * For example, if logical clustersize = 4096, 1 for 8192. - * bit 5-7 : (physical - logical) cluster bits of head 2. - */ - __u8 h_clusterbits; -}; - -#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 - -/* - * Z_EROFS Variable-sized Logical Extent cluster type: - * 0 - literal (uncompressed) cluster - * 1 - compressed cluster (for the head logical cluster) - * 2 - compressed cluster (for the other logical clusters) - * - * In detail, - * 0 - literal (uncompressed) cluster, - * di_advise = 0 - * di_clusterofs = the literal data offset of the cluster - * di_blkaddr = the blkaddr of the literal cluster - * - * 1 - compressed cluster (for the head logical cluster) - * di_advise = 1 - * di_clusterofs = the decompressed data offset of the cluster - * di_blkaddr = the blkaddr of the compressed cluster - * - * 2 - compressed cluster (for the other logical clusters) - * di_advise = 2 - * di_clusterofs = - * the decompressed data offset in its own head cluster - * di_u.delta[0] = distance to its corresponding head cluster - * di_u.delta[1] = distance to its corresponding tail cluster - * (di_advise could be 0, 1 or 2) - */ -enum { - Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, - Z_EROFS_VLE_CLUSTER_TYPE_HEAD, - Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, - Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, - Z_EROFS_VLE_CLUSTER_TYPE_MAX -}; - -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 - -struct z_erofs_vle_decompressed_index { - __le16 di_advise; - /* where to decompress in the head cluster */ - __le16 di_clusterofs; - - union { - /* for the head cluster */ - __le32 blkaddr; - /* - * for the rest clusters - * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) - * [0] - pointing to the head cluster - * [1] - pointing to the tail cluster - */ - __le16 delta[2]; - } di_u __packed; /* 8 bytes */ -} __packed; - -#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ - (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ - sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) - -/* dirent sorts in alphabet order, thus we can do binary search */ -struct erofs_dirent { - __le64 nid; /* 0, node number */ - __le16 nameoff; /* 8, start offset of file name */ - __u8 file_type; /* 10, file type */ - __u8 reserved; /* 11, reserved */ -} __packed; - -/* - * EROFS file types should match generic FT_* types and - * it seems no need to add BUILD_BUG_ONs since potential - * unmatchness will break other fses as well... - */ - -#define EROFS_NAME_LEN 255 - -/* check the EROFS on-disk layout strictly at compile time */ -static inline void erofs_check_ondisk_layout_definitions(void) -{ - BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); - BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); - BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); - BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); - BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); - BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); - BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); - BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); - - BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < - Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); -} - -#endif - diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h deleted file mode 100644 index bfb2da9c4eee..000000000000 --- a/drivers/staging/erofs/include/trace/events/erofs.h +++ /dev/null @@ -1,256 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM erofs - -#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_EROFS_H - -#include - -#define show_dev(dev) MAJOR(dev), MINOR(dev) -#define show_dev_nid(entry) show_dev(entry->dev), entry->nid - -#define show_file_type(type) \ - __print_symbolic(type, \ - { 0, "FILE" }, \ - { 1, "DIR" }) - -#define show_map_flags(flags) __print_flags(flags, "|", \ - { EROFS_GET_BLOCKS_RAW, "RAW" }) - -#define show_mflags(flags) __print_flags(flags, "", \ - { EROFS_MAP_MAPPED, "M" }, \ - { EROFS_MAP_META, "I" }, \ - { EROFS_MAP_ZIPPED, "Z" }) - -TRACE_EVENT(erofs_lookup, - - TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), - - TP_ARGS(dir, dentry, flags), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(const char *, name ) - __field(unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = dir->i_sb->s_dev; - __entry->nid = EROFS_V(dir)->nid; - __entry->name = dentry->d_name.name; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", - show_dev_nid(__entry), - __entry->name, - __entry->flags) -); - -TRACE_EVENT(erofs_fill_inode, - TP_PROTO(struct inode *inode, int isdir), - TP_ARGS(inode, isdir), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(erofs_blk_t, blkaddr ) - __field(unsigned int, ofs ) - __field(int, isdir ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->isdir = isdir; - ), - - TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", - show_dev_nid(__entry), - __entry->blkaddr, __entry->ofs, - __entry->isdir) -); - -TRACE_EVENT(erofs_readpage, - - TP_PROTO(struct page *page, bool raw), - - TP_ARGS(page, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(int, dir ) - __field(pgoff_t, index ) - __field(int, uptodate) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->nid = EROFS_V(page->mapping->host)->nid; - __entry->dir = S_ISDIR(page->mapping->host->i_mode); - __entry->index = page->index; - __entry->uptodate = PageUptodate(page); - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " - "raw = %d", - show_dev_nid(__entry), - show_file_type(__entry->dir), - (unsigned long)__entry->index, - __entry->uptodate, - __entry->raw) -); - -TRACE_EVENT(erofs_readpages, - - TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, - bool raw), - - TP_ARGS(inode, page, nrpage, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(pgoff_t, start ) - __field(unsigned int, nrpage ) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->start = page->index; - __entry->nrpage = nrpage; - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", - show_dev_nid(__entry), - (unsigned long)__entry->start, - __entry->nrpage, - __entry->raw) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( erofs_off_t, la ) - __field( u64, llen ) - __field( unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->la = map->m_la; - __entry->llen = map->m_llen; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", - show_dev_nid(__entry), - __entry->la, __entry->llen, - __entry->flags ? show_map_flags(__entry->flags) : "NULL") -); - -DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags), - - TP_ARGS(inode, map, flags) -); - -DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( unsigned int, flags ) - __field( erofs_off_t, la ) - __field( erofs_off_t, pa ) - __field( u64, llen ) - __field( u64, plen ) - __field( unsigned int, mflags ) - __field( int, ret ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->flags = flags; - __entry->la = map->m_la; - __entry->pa = map->m_pa; - __entry->llen = map->m_llen; - __entry->plen = map->m_plen; - __entry->mflags = map->m_flags; - __entry->ret = ret; - ), - - TP_printk("dev = (%d,%d), nid = %llu, flags %s " - "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", - show_dev_nid(__entry), - __entry->flags ? show_map_flags(__entry->flags) : "NULL", - __entry->la, __entry->pa, __entry->llen, __entry->plen, - show_mflags(__entry->mflags), __entry->ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -TRACE_EVENT(erofs_destroy_inode, - TP_PROTO(struct inode *inode), - - TP_ARGS(inode), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - ), - - TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) -); - -#endif /* _TRACE_EROFS_H */ - - /* This part must be outside protection */ -#include diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c deleted file mode 100644 index cbc2c342a37f..000000000000 --- a/drivers/staging/erofs/inode.c +++ /dev/null @@ -1,334 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/inode.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -/* no locking */ -static int read_inode(struct inode *inode, void *data) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_inode_v1 *v1 = data; - const unsigned int advise = le16_to_cpu(v1->i_advise); - erofs_blk_t nblks = 0; - - vi->datamode = __inode_data_mapping(advise); - - if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { - errln("unsupported data mapping %u of nid %llu", - vi->datamode, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { - struct erofs_inode_v2 *v2 = data; - - vi->inode_isize = sizeof(struct erofs_inode_v2); - vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v2->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v2->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le32_to_cpu(v2->i_uid)); - i_gid_write(inode, le32_to_cpu(v2->i_gid)); - set_nlink(inode, le32_to_cpu(v2->i_nlink)); - - /* ns timestamp */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - le64_to_cpu(v2->i_ctime); - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - le32_to_cpu(v2->i_ctime_nsec); - - inode->i_size = le64_to_cpu(v2->i_size); - - /* total blocks for compressed files */ - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v2->i_u.compressed_blocks); - } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - vi->inode_isize = sizeof(struct erofs_inode_v1); - vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v1->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v1->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le16_to_cpu(v1->i_uid)); - i_gid_write(inode, le16_to_cpu(v1->i_gid)); - set_nlink(inode, le16_to_cpu(v1->i_nlink)); - - /* use build time to derive all file time */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - sbi->build_time; - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - sbi->build_time_nsec; - - inode->i_size = le32_to_cpu(v1->i_size); - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v1->i_u.compressed_blocks); - } else { - errln("unsupported on-disk inode version %u of nid %llu", - __inode_version(advise), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (!nblks) - /* measure inode.i_blocks as generic filesystems */ - inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; - else - inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; - -bogusimode: - errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; -} - -/* - * try_lock can be required since locking order is: - * file data(fs_inode) - * meta(bd_inode) - * but the majority of the callers is "iget", - * in that case we are pretty sure no deadlock since - * no data operations exist. However I tend to - * try_lock since it takes no much overhead and - * will success immediately. - */ -static int fill_inline_data(struct inode *inode, void *data, - unsigned int m_pofs) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_sb_info *sbi = EROFS_I_SB(inode); - - /* should be inode inline C */ - if (!is_inode_flat_inline(inode)) - return 0; - - /* fast symlink (following ext4) */ - if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { - char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); - - if (unlikely(!lnk)) - return -ENOMEM; - - m_pofs += vi->inode_isize + vi->xattr_isize; - - /* inline symlink data shouldn't across page boundary as well */ - if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { - kfree(lnk); - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* get in-page inline data */ - memcpy(lnk, data + m_pofs, inode->i_size); - lnk[inode->i_size] = '\0'; - - inode->i_link = lnk; - set_inode_fast_symlink(inode); - } - return 0; -} - -static int fill_inode(struct inode *inode, int isdir) -{ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - struct erofs_vnode *vi = EROFS_V(inode); - struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; - unsigned int ofs; - erofs_off_t inode_loc; - - trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(sbi, vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - - debugln("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - - page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); - - if (IS_ERR(page)) { - errln("failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); - return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = read_inode(inode, data + ofs); - if (!err) { - /* setup the new inode */ - if (S_ISREG(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - inode->i_fop = &generic_ro_fops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &erofs_dir_iops; - inode->i_fop = &erofs_dir_fops; - } else if (S_ISLNK(inode->i_mode)) { - /* by default, page_get_link is used for symlink */ - inode->i_op = &erofs_symlink_iops; - inode_nohighmem(inode); - } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || - S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - init_special_inode(inode, inode->i_mode, inode->i_rdev); - goto out_unlock; - } else { - err = -EFSCORRUPTED; - goto out_unlock; - } - - if (is_inode_layout_compression(inode)) { - err = z_erofs_fill_inode(inode); - goto out_unlock; - } - - inode->i_mapping->a_ops = &erofs_raw_access_aops; - - /* fill last page if inline data is available */ - err = fill_inline_data(inode, data, ofs); - } - -out_unlock: - unlock_page(page); - put_page(page); - return err; -} - -/* - * erofs nid is 64bits, but i_ino is 'unsigned long', therefore - * we should do more for 32-bit platform to find the right inode. - */ -#if BITS_PER_LONG == 32 -static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - return EROFS_V(inode)->nid == nid; -} - -static int erofs_iget_set_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - inode->i_ino = erofs_inode_hash(nid); - return 0; -} -#endif - -static inline struct inode *erofs_iget_locked(struct super_block *sb, - erofs_nid_t nid) -{ - const unsigned long hashval = erofs_inode_hash(nid); - -#if BITS_PER_LONG >= 64 - /* it is safe to use iget_locked for >= 64-bit platform */ - return iget_locked(sb, hashval); -#else - return iget5_locked(sb, hashval, erofs_ilookup_test_actor, - erofs_iget_set_actor, &nid); -#endif -} - -struct inode *erofs_iget(struct super_block *sb, - erofs_nid_t nid, - bool isdir) -{ - struct inode *inode = erofs_iget_locked(sb, nid); - - if (unlikely(!inode)) - return ERR_PTR(-ENOMEM); - - if (inode->i_state & I_NEW) { - int err; - struct erofs_vnode *vi = EROFS_V(inode); - - vi->nid = nid; - - err = fill_inode(inode, isdir); - if (likely(!err)) - unlock_new_inode(inode); - else { - iget_failed(inode); - inode = ERR_PTR(err); - } - } - return inode; -} - -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags) -{ - struct inode *const inode = d_inode(path->dentry); - - if (is_inode_layout_compression(inode)) - stat->attributes |= STATX_ATTR_COMPRESSED; - - stat->attributes |= STATX_ATTR_IMMUTABLE; - stat->attributes_mask |= (STATX_ATTR_COMPRESSED | - STATX_ATTR_IMMUTABLE); - - generic_fillattr(inode, stat); - return 0; -} - -const struct inode_operations erofs_generic_iops = { - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_symlink_iops = { - .get_link = page_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_fast_symlink_iops = { - .get_link = simple_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h deleted file mode 100644 index 0e8d58546c52..000000000000 --- a/drivers/staging/erofs/internal.h +++ /dev/null @@ -1,554 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/internal.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_INTERNAL_H -#define __EROFS_INTERNAL_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include "erofs_fs.h" - -/* redefine pr_fmt "erofs: " */ -#undef pr_fmt -#define pr_fmt(fmt) "erofs: " fmt - -#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) -#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) -#ifdef CONFIG_EROFS_FS_DEBUG -#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) -#define DBG_BUGON BUG_ON -#else -#define debugln(x, ...) ((void)0) -#define DBG_BUGON(x) ((void)(x)) -#endif /* !CONFIG_EROFS_FS_DEBUG */ - -enum { - FAULT_KMALLOC, - FAULT_READ_IO, - FAULT_MAX, -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -extern const char *erofs_fault_name[FAULT_MAX]; -#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) - -struct erofs_fault_info { - atomic_t inject_ops; - unsigned int inject_rate; - unsigned int inject_type; -}; -#endif /* CONFIG_EROFS_FAULT_INJECTION */ - -/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ -#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 - -typedef u64 erofs_nid_t; -typedef u64 erofs_off_t; -/* data type for filesystem-wide blocks number */ -typedef u32 erofs_blk_t; - -struct erofs_sb_info { -#ifdef CONFIG_EROFS_FS_ZIP - /* list for all registered superblocks, mainly for shrinker */ - struct list_head list; - struct mutex umount_mutex; - - /* the dedicated workstation for compression */ - struct radix_tree_root workstn_tree; - - /* threshold for decompression synchronously */ - unsigned int max_sync_decompress_pages; - - unsigned int shrinker_run_no; - - /* current strategy of how to use managed cache */ - unsigned char cache_strategy; - - /* pseudo inode to manage cached pages */ - struct inode *managed_cache; -#endif /* CONFIG_EROFS_FS_ZIP */ - u32 blocks; - u32 meta_blkaddr; -#ifdef CONFIG_EROFS_FS_XATTR - u32 xattr_blkaddr; -#endif - - /* inode slot unit size in bit shift */ - unsigned char islotbits; - - u32 build_time_nsec; - u64 build_time; - - /* what we really care is nid, rather than ino.. */ - erofs_nid_t root_nid; - /* used for statfs, f_files - f_favail */ - u64 inos; - - u8 uuid[16]; /* 128-bit uuid for volume */ - u8 volume_name[16]; /* volume name */ - u32 requirements; - - unsigned int mount_opt; - -#ifdef CONFIG_EROFS_FAULT_INJECTION - struct erofs_fault_info fault_info; /* For fault injection */ -#endif -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -#define erofs_show_injection_info(type) \ - infoln("inject %s in %s of %pS", erofs_fault_name[type], \ - __func__, __builtin_return_address(0)) - -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (!ffi->inject_rate) - return false; - - if (!IS_FAULT_SET(ffi, type)) - return false; - - atomic_inc(&ffi->inject_ops); - if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { - atomic_set(&ffi->inject_ops, 0); - return true; - } - return false; -} -#else -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - return false; -} - -static inline void erofs_show_injection_info(int type) -{ -} -#endif /* !CONFIG_EROFS_FAULT_INJECTION */ - -static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, - size_t size, gfp_t flags) -{ - if (time_to_inject(sbi, FAULT_KMALLOC)) { - erofs_show_injection_info(FAULT_KMALLOC); - return NULL; - } - return kmalloc(size, flags); -} - -#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) -#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) - -/* Mount flags set via mount options or defaults */ -#define EROFS_MOUNT_XATTR_USER 0x00000010 -#define EROFS_MOUNT_POSIX_ACL 0x00000020 -#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 - -#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) -#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) -#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) - -#ifdef CONFIG_EROFS_FS_ZIP -enum { - EROFS_ZIP_CACHE_DISABLED, - EROFS_ZIP_CACHE_READAHEAD, - EROFS_ZIP_CACHE_READAROUND -}; - -#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) - -/* basic unit of the workstation of a super_block */ -struct erofs_workgroup { - /* the workgroup index in the workstation */ - pgoff_t index; - - /* overall workgroup reference count */ - atomic_t refcount; -}; - -#if defined(CONFIG_SMP) -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - /* - * other observers should notice all modifications - * in the freezing period. - */ - smp_mb(); - atomic_set(&grp->refcount, orig_val); - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - return atomic_cond_read_relaxed(&grp->refcount, - VAL != EROFS_LOCKED_MAGIC); -} -#else -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - /* no need to spin on UP platforms, let's just disable preemption. */ - if (val != atomic_read(&grp->refcount)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - int v = atomic_read(&grp->refcount); - - /* workgroup is never freezed on uniprocessor systems */ - DBG_BUGON(v == EROFS_LOCKED_MAGIC); - return v; -} -#endif /* !CONFIG_SMP */ - -/* hard limit of pages per compressed cluster */ -#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) -#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES -#else -#define EROFS_PCPUBUF_NR_PAGES 0 -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* we strictly follow PAGE_SIZE and no buffer head yet */ -#define LOG_BLOCK_SIZE PAGE_SHIFT - -#undef LOG_SECTORS_PER_BLOCK -#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) - -#undef SECTORS_PER_BLOCK -#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) - -#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) - -#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) -#error erofs cannot be used in this platform -#endif - -#define EROFS_IO_MAX_RETRIES_NOFAIL 5 - -#define ROOT_NID(sb) ((sb)->root_nid) - -#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) -#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) -#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) - -static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) -{ - return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); -} - -/* atomic flag definitions */ -#define EROFS_V_EA_INITED_BIT 0 -#define EROFS_V_Z_INITED_BIT 1 - -/* bitlock definitions (arranged in reverse order) */ -#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) -#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) - -struct erofs_vnode { - erofs_nid_t nid; - - /* atomic flags (including bitlocks) */ - unsigned long flags; - - unsigned char datamode; - unsigned char inode_isize; - unsigned short xattr_isize; - - unsigned int xattr_shared_count; - unsigned int *xattr_shared_xattrs; - - union { - erofs_blk_t raw_blkaddr; -#ifdef CONFIG_EROFS_FS_ZIP - struct { - unsigned short z_advise; - unsigned char z_algorithmtype[2]; - unsigned char z_logical_clusterbits; - unsigned char z_physical_clusterbits[2]; - }; -#endif /* CONFIG_EROFS_FS_ZIP */ - }; - /* the corresponding vfs inode */ - struct inode vfs_inode; -}; - -#define EROFS_V(ptr) \ - container_of(ptr, struct erofs_vnode, vfs_inode) - -#define __inode_advise(x, bit, bits) \ - (((x) >> (bit)) & ((1 << (bits)) - 1)) - -#define __inode_version(advise) \ - __inode_advise(advise, EROFS_I_VERSION_BIT, \ - EROFS_I_VERSION_BITS) - -#define __inode_data_mapping(advise) \ - __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ - EROFS_I_DATA_MAPPING_BITS) - -static inline unsigned long inode_datablocks(struct inode *inode) -{ - /* since i_size cannot be changed */ - return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); -} - -static inline bool is_inode_layout_compression(struct inode *inode) -{ - return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); -} - -static inline bool is_inode_flat_inline(struct inode *inode) -{ - return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; -} - -extern const struct super_operations erofs_sops; - -extern const struct address_space_operations erofs_raw_access_aops; -#ifdef CONFIG_EROFS_FS_ZIP -extern const struct address_space_operations z_erofs_vle_normalaccess_aops; -#endif - -/* - * Logical to physical block mapping, used by erofs_map_blocks() - * - * Different with other file systems, it is used for 2 access modes: - * - * 1) RAW access mode: - * - * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, - * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). - * - * Note that m_lblk in the RAW access mode refers to the number of - * the compressed ondisk block rather than the uncompressed - * in-memory block for the compressed file. - * - * m_pofs equals to m_lofs except for the inline data page. - * - * 2) Normal access mode: - * - * If the inode is not compressed, it has no difference with - * the RAW access mode. However, if the inode is compressed, - * users should pass a valid (m_lblk, m_lofs) pair, and get - * the needed m_pblk, m_pofs, m_len to get the compressed data - * and the updated m_lblk, m_lofs which indicates the start - * of the corresponding uncompressed data in the file. - */ -enum { - BH_Zipped = BH_PrivateStart, - BH_FullMapped, -}; - -/* Has a disk mapping */ -#define EROFS_MAP_MAPPED (1 << BH_Mapped) -/* Located in metadata (could be copied from bd_inode) */ -#define EROFS_MAP_META (1 << BH_Meta) -/* The extent has been compressed */ -#define EROFS_MAP_ZIPPED (1 << BH_Zipped) -/* The length of extent is full */ -#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) - -struct erofs_map_blocks { - erofs_off_t m_pa, m_la; - u64 m_plen, m_llen; - - unsigned int m_flags; - - struct page *mpage; -}; - -/* Flags used by erofs_map_blocks() */ -#define EROFS_GET_BLOCKS_RAW 0x0001 - -/* zmap.c */ -#ifdef CONFIG_EROFS_FS_ZIP -int z_erofs_fill_inode(struct inode *inode); -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags); -#else -static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } -static inline int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* data.c */ -static inline struct bio *erofs_grab_bio(struct super_block *sb, - erofs_blk_t blkaddr, - unsigned int nr_pages, - void *bi_private, bio_end_io_t endio, - bool nofail) -{ - const gfp_t gfp = GFP_NOIO; - struct bio *bio; - - do { - if (nr_pages == 1) { - bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); - if (unlikely(!bio)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - break; - } - bio = bio_alloc(gfp, nr_pages); - nr_pages /= 2; - } while (unlikely(!bio)); - - bio->bi_end_io = endio; - bio_set_dev(bio, sb->s_bdev); - bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; - bio->bi_private = bi_private; - return bio; -} - -static inline void __submit_bio(struct bio *bio, unsigned int op, - unsigned int op_flags) -{ - bio_set_op_attrs(bio, op, op_flags); - submit_bio(bio); -} - -struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, - bool prio, bool nofail); - -static inline struct page *erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio) -{ - return __erofs_get_meta_page(sb, blkaddr, prio, false); -} - -int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); - -static inline struct page *erofs_get_inline_page(struct inode *inode, - erofs_blk_t blkaddr) -{ - return erofs_get_meta_page(inode->i_sb, blkaddr, - S_ISDIR(inode->i_mode)); -} - -/* inode.c */ -static inline unsigned long erofs_inode_hash(erofs_nid_t nid) -{ -#if BITS_PER_LONG == 32 - return (nid >> 32) ^ (nid & 0xffffffff); -#else - return nid; -#endif -} - -extern const struct inode_operations erofs_generic_iops; -extern const struct inode_operations erofs_symlink_iops; -extern const struct inode_operations erofs_fast_symlink_iops; - -static inline void set_inode_fast_symlink(struct inode *inode) -{ - inode->i_op = &erofs_fast_symlink_iops; -} - -static inline bool is_inode_fast_symlink(struct inode *inode) -{ - return inode->i_op == &erofs_fast_symlink_iops; -} - -struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags); - -/* namei.c */ -extern const struct inode_operations erofs_dir_iops; - -int erofs_namei(struct inode *dir, struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type); - -/* dir.c */ -extern const struct file_operations erofs_dir_fops; - -/* utils.c / zdata.c */ -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -void *erofs_get_pcpubuf(unsigned int pagenr); -#define erofs_put_pcpubuf(buf) do { \ - (void)&(buf); \ - preempt_enable(); \ -} while (0) -#else -static inline void *erofs_get_pcpubuf(unsigned int pagenr) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -#define erofs_put_pcpubuf(buf) do {} while (0) -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -int erofs_workgroup_put(struct erofs_workgroup *grp); -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag); -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, bool tag); -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); -void erofs_shrinker_register(struct super_block *sb); -void erofs_shrinker_unregister(struct super_block *sb); -int __init erofs_init_shrinker(void); -void erofs_exit_shrinker(void); -int __init z_erofs_init_zip_subsystem(void); -void z_erofs_exit_zip_subsystem(void); -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page); -#else -static inline void erofs_shrinker_register(struct super_block *sb) {} -static inline void erofs_shrinker_unregister(struct super_block *sb) {} -static inline int erofs_init_shrinker(void) { return 0; } -static inline void erofs_exit_shrinker(void) {} -static inline int z_erofs_init_zip_subsystem(void) { return 0; } -static inline void z_erofs_exit_zip_subsystem(void) {} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ - -#endif /* __EROFS_INTERNAL_H */ - diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c deleted file mode 100644 index 8334a910acef..000000000000 --- a/drivers/staging/erofs/namei.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/namei.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -struct erofs_qstr { - const unsigned char *name; - const unsigned char *end; -}; - -/* based on the end of qn is accurate and it must have the trailing '\0' */ -static inline int dirnamecmp(const struct erofs_qstr *qn, - const struct erofs_qstr *qd, - unsigned int *matched) -{ - unsigned int i = *matched; - - /* - * on-disk error, let's only BUG_ON in the debugging mode. - * otherwise, it will return 1 to just skip the invalid name - * and go on (in consideration of the lookup performance). - */ - DBG_BUGON(qd->name > qd->end); - - /* qd could not have trailing '\0' */ - /* However it is absolutely safe if < qd->end */ - while (qd->name + i < qd->end && qd->name[i] != '\0') { - if (qn->name[i] != qd->name[i]) { - *matched = i; - return qn->name[i] > qd->name[i] ? 1 : -1; - } - ++i; - } - *matched = i; - /* See comments in __d_alloc on the terminating NUL character */ - return qn->name[i] == '\0' ? 0 : 1; -} - -#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) - -static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, - u8 *data, - unsigned int dirblksize, - const int ndirents) -{ - int head, back; - unsigned int startprfx, endprfx; - struct erofs_dirent *const de = (struct erofs_dirent *)data; - - /* since the 1st dirent has been evaluated previously */ - head = 1; - back = ndirents - 1; - startprfx = endprfx = 0; - - while (head <= back) { - const int mid = head + (back - head) / 2; - const int nameoff = nameoff_from_disk(de[mid].nameoff, - dirblksize); - unsigned int matched = min(startprfx, endprfx); - struct erofs_qstr dname = { - .name = data + nameoff, - .end = unlikely(mid >= ndirents - 1) ? - data + dirblksize : - data + nameoff_from_disk(de[mid + 1].nameoff, - dirblksize) - }; - - /* string comparison without already matched prefix */ - int ret = dirnamecmp(name, &dname, &matched); - - if (unlikely(!ret)) { - return de + mid; - } else if (ret > 0) { - head = mid + 1; - startprfx = matched; - } else { - back = mid - 1; - endprfx = matched; - } - } - - return ERR_PTR(-ENOENT); -} - -static struct page *find_target_block_classic(struct inode *dir, - struct erofs_qstr *name, - int *_ndirents) -{ - unsigned int startprfx, endprfx; - int head, back; - struct address_space *const mapping = dir->i_mapping; - struct page *candidate = ERR_PTR(-ENOENT); - - startprfx = endprfx = 0; - head = 0; - back = inode_datablocks(dir) - 1; - - while (head <= back) { - const int mid = head + (back - head) / 2; - struct page *page = read_mapping_page(mapping, mid, NULL); - - if (!IS_ERR(page)) { - struct erofs_dirent *de = kmap_atomic(page); - const int nameoff = nameoff_from_disk(de->nameoff, - EROFS_BLKSIZ); - const int ndirents = nameoff / sizeof(*de); - int diff; - unsigned int matched; - struct erofs_qstr dname; - - if (unlikely(!ndirents)) { - kunmap_atomic(de); - put_page(page); - errln("corrupted dir block %d @ nid %llu", - mid, EROFS_V(dir)->nid); - DBG_BUGON(1); - page = ERR_PTR(-EFSCORRUPTED); - goto out; - } - - matched = min(startprfx, endprfx); - - dname.name = (u8 *)de + nameoff; - if (ndirents == 1) - dname.end = (u8 *)de + EROFS_BLKSIZ; - else - dname.end = (u8 *)de + - nameoff_from_disk(de[1].nameoff, - EROFS_BLKSIZ); - - /* string comparison without already matched prefix */ - diff = dirnamecmp(name, &dname, &matched); - kunmap_atomic(de); - - if (unlikely(!diff)) { - *_ndirents = 0; - goto out; - } else if (diff > 0) { - head = mid + 1; - startprfx = matched; - - if (!IS_ERR(candidate)) - put_page(candidate); - candidate = page; - *_ndirents = ndirents; - } else { - put_page(page); - - back = mid - 1; - endprfx = matched; - } - continue; - } -out: /* free if the candidate is valid */ - if (!IS_ERR(candidate)) - put_page(candidate); - return page; - } - return candidate; -} - -int erofs_namei(struct inode *dir, - struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type) -{ - int ndirents; - struct page *page; - void *data; - struct erofs_dirent *de; - struct erofs_qstr qn; - - if (unlikely(!dir->i_size)) - return -ENOENT; - - qn.name = name->name; - qn.end = name->name + name->len; - - ndirents = 0; - page = find_target_block_classic(dir, &qn, &ndirents); - - if (IS_ERR(page)) - return PTR_ERR(page); - - data = kmap_atomic(page); - /* the target page has been mapped */ - if (ndirents) - de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); - else - de = (struct erofs_dirent *)data; - - if (!IS_ERR(de)) { - *nid = le64_to_cpu(de->nid); - *d_type = de->file_type; - } - - kunmap_atomic(data); - put_page(page); - - return PTR_ERR_OR_ZERO(de); -} - -/* NOTE: i_mutex is already held by vfs */ -static struct dentry *erofs_lookup(struct inode *dir, - struct dentry *dentry, - unsigned int flags) -{ - int err; - erofs_nid_t nid; - unsigned int d_type; - struct inode *inode; - - DBG_BUGON(!d_really_is_negative(dentry)); - /* dentry must be unhashed in lookup, no need to worry about */ - DBG_BUGON(!d_unhashed(dentry)); - - trace_erofs_lookup(dir, dentry, flags); - - /* file name exceeds fs limit */ - if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) - return ERR_PTR(-ENAMETOOLONG); - - /* false uninitialized warnings on gcc 4.8.x */ - err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); - - if (err == -ENOENT) { - /* negative dentry */ - inode = NULL; - } else if (unlikely(err)) { - inode = ERR_PTR(err); - } else { - debugln("%s, %s (nid %llu) found, d_type %u", __func__, - dentry->d_name.name, nid, d_type); - inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); - } - return d_splice_alias(inode, dentry); -} - -const struct inode_operations erofs_dir_iops = { - .lookup = erofs_lookup, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c deleted file mode 100644 index 2da471010a86..000000000000 --- a/drivers/staging/erofs/super.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/super.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include -#include -#include -#include -#include "xattr.h" - -#define CREATE_TRACE_POINTS -#include - -static struct kmem_cache *erofs_inode_cachep __read_mostly; - -static void init_once(void *ptr) -{ - struct erofs_vnode *vi = ptr; - - inode_init_once(&vi->vfs_inode); -} - -static int __init erofs_init_inode_cache(void) -{ - erofs_inode_cachep = kmem_cache_create("erofs_inode", - sizeof(struct erofs_vnode), 0, - SLAB_RECLAIM_ACCOUNT, - init_once); - - return erofs_inode_cachep ? 0 : -ENOMEM; -} - -static void erofs_exit_inode_cache(void) -{ - kmem_cache_destroy(erofs_inode_cachep); -} - -static struct inode *alloc_inode(struct super_block *sb) -{ - struct erofs_vnode *vi = - kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); - - if (!vi) - return NULL; - - /* zero out everything except vfs_inode */ - memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); - return &vi->vfs_inode; -} - -static void free_inode(struct inode *inode) -{ - struct erofs_vnode *vi = EROFS_V(inode); - - /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ - if (is_inode_fast_symlink(inode)) - kfree(inode->i_link); - - kfree(vi->xattr_shared_xattrs); - - kmem_cache_free(erofs_inode_cachep, vi); -} - -static bool check_layout_compatibility(struct super_block *sb, - struct erofs_super_block *layout) -{ - const unsigned int requirements = le32_to_cpu(layout->requirements); - - EROFS_SB(sb)->requirements = requirements; - - /* check if current kernel meets all mandatory requirements */ - if (requirements & (~EROFS_ALL_REQUIREMENTS)) { - errln("unidentified requirements %x, please upgrade kernel version", - requirements & ~EROFS_ALL_REQUIREMENTS); - return false; - } - return true; -} - -static int superblock_read(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - struct buffer_head *bh; - struct erofs_super_block *layout; - unsigned int blkszbits; - int ret; - - bh = sb_bread(sb, 0); - - if (!bh) { - errln("cannot read erofs superblock"); - return -EIO; - } - - sbi = EROFS_SB(sb); - layout = (struct erofs_super_block *)((u8 *)bh->b_data - + EROFS_SUPER_OFFSET); - - ret = -EINVAL; - if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { - errln("cannot find valid erofs superblock"); - goto out; - } - - blkszbits = layout->blkszbits; - /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ - if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { - errln("blksize %u isn't supported on this platform", - 1 << blkszbits); - goto out; - } - - if (!check_layout_compatibility(sb, layout)) - goto out; - - sbi->blocks = le32_to_cpu(layout->blocks); - sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); -#ifdef CONFIG_EROFS_FS_XATTR - sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); -#endif - sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; - sbi->root_nid = le16_to_cpu(layout->root_nid); - sbi->inos = le64_to_cpu(layout->inos); - - sbi->build_time = le64_to_cpu(layout->build_time); - sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); - - memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); - - ret = strscpy(sbi->volume_name, layout->volume_name, - sizeof(layout->volume_name)); - if (ret < 0) { /* -E2BIG */ - errln("bad volume name without NIL terminator"); - ret = -EFSCORRUPTED; - goto out; - } - ret = 0; -out: - brelse(bh); - return ret; -} - -#ifdef CONFIG_EROFS_FAULT_INJECTION -const char *erofs_fault_name[FAULT_MAX] = { - [FAULT_KMALLOC] = "kmalloc", - [FAULT_READ_IO] = "read IO error", -}; - -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (rate) { - atomic_set(&ffi->inject_ops, 0); - ffi->inject_rate = rate; - ffi->inject_type = (1 << FAULT_MAX) - 1; - } else { - memset(ffi, 0, sizeof(struct erofs_fault_info)); - } - - set_opt(sbi, FAULT_INJECTION); -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - int rate = 0; - - if (args->from && match_int(args, &rate)) - return -EINVAL; - - __erofs_build_fault_attr(sbi, rate); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return sbi->fault_info.inject_rate; -} -#else -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("fault_injection options not supported"); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return 0; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - const char *cs = match_strdup(args); - int err = 0; - - if (!cs) { - errln("Not enough memory to store cache strategy"); - return -ENOMEM; - } - - if (!strcmp(cs, "disabled")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; - } else if (!strcmp(cs, "readahead")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; - } else if (!strcmp(cs, "readaround")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - } else { - errln("Unrecognized cache strategy \"%s\"", cs); - err = -EINVAL; - } - kfree(cs); - return err; -} -#else -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("EROFS compression is disabled, so cache strategy is ignored"); - return 0; -} -#endif - -/* set up default EROFS parameters */ -static void default_options(struct erofs_sb_info *sbi) -{ -#ifdef CONFIG_EROFS_FS_ZIP - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - sbi->max_sync_decompress_pages = 3; -#endif -#ifdef CONFIG_EROFS_FS_XATTR - set_opt(sbi, XATTR_USER); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - set_opt(sbi, POSIX_ACL); -#endif -} - -enum { - Opt_user_xattr, - Opt_nouser_xattr, - Opt_acl, - Opt_noacl, - Opt_fault_injection, - Opt_cache_strategy, - Opt_err -}; - -static match_table_t erofs_tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_fault_injection, "fault_injection=%u"}, - {Opt_cache_strategy, "cache_strategy=%s"}, - {Opt_err, NULL} -}; - -static int parse_options(struct super_block *sb, char *options) -{ - substring_t args[MAX_OPT_ARGS]; - char *p; - int err; - - if (!options) - return 0; - - while ((p = strsep(&options, ","))) { - int token; - - if (!*p) - continue; - - args[0].to = args[0].from = NULL; - token = match_token(p, erofs_tokens, args); - - switch (token) { -#ifdef CONFIG_EROFS_FS_XATTR - case Opt_user_xattr: - set_opt(EROFS_SB(sb), XATTR_USER); - break; - case Opt_nouser_xattr: - clear_opt(EROFS_SB(sb), XATTR_USER); - break; -#else - case Opt_user_xattr: - infoln("user_xattr options not supported"); - break; - case Opt_nouser_xattr: - infoln("nouser_xattr options not supported"); - break; -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - case Opt_acl: - set_opt(EROFS_SB(sb), POSIX_ACL); - break; - case Opt_noacl: - clear_opt(EROFS_SB(sb), POSIX_ACL); - break; -#else - case Opt_acl: - infoln("acl options not supported"); - break; - case Opt_noacl: - infoln("noacl options not supported"); - break; -#endif - case Opt_fault_injection: - err = erofs_build_fault_attr(EROFS_SB(sb), args); - if (err) - return err; - break; - case Opt_cache_strategy: - err = erofs_build_cache_strategy(EROFS_SB(sb), args); - if (err) - return err; - break; - default: - errln("Unrecognized mount option \"%s\" or missing value", p); - return -EINVAL; - } - } - return 0; -} - -#ifdef CONFIG_EROFS_FS_ZIP -static const struct address_space_operations managed_cache_aops; - -static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) -{ - int ret = 1; /* 0 - busy */ - struct address_space *const mapping = page->mapping; - - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(mapping->a_ops != &managed_cache_aops); - - if (PagePrivate(page)) - ret = erofs_try_to_free_cached_page(mapping, page); - - return ret; -} - -static void managed_cache_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) -{ - const unsigned int stop = length + offset; - - DBG_BUGON(!PageLocked(page)); - - /* Check for potential overflow in debug mode */ - DBG_BUGON(stop > PAGE_SIZE || stop < length); - - if (offset == 0 && stop == PAGE_SIZE) - while (!managed_cache_releasepage(page, GFP_NOFS)) - cond_resched(); -} - -static const struct address_space_operations managed_cache_aops = { - .releasepage = managed_cache_releasepage, - .invalidatepage = managed_cache_invalidatepage, -}; - -static int erofs_init_managed_cache(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - struct inode *const inode = new_inode(sb); - - if (unlikely(!inode)) - return -ENOMEM; - - set_nlink(inode, 1); - inode->i_size = OFFSET_MAX; - - inode->i_mapping->a_ops = &managed_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, - GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); - sbi->managed_cache = inode; - return 0; -} -#else -static int erofs_init_managed_cache(struct super_block *sb) { return 0; } -#endif - -static int erofs_fill_super(struct super_block *sb, void *data, int silent) -{ - struct inode *inode; - struct erofs_sb_info *sbi; - int err; - - infoln("fill_super, device -> %s", sb->s_id); - infoln("options -> %s", (char *)data); - - sb->s_magic = EROFS_SUPER_MAGIC; - - if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { - errln("failed to set erofs blksize"); - return -EINVAL; - } - - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (unlikely(!sbi)) - return -ENOMEM; - - sb->s_fs_info = sbi; - err = superblock_read(sb); - if (err) - return err; - - sb->s_flags |= SB_RDONLY | SB_NOATIME; - sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_time_gran = 1; - - sb->s_op = &erofs_sops; - -#ifdef CONFIG_EROFS_FS_XATTR - sb->s_xattr = erofs_xattr_handlers; -#endif - /* set erofs default mount options */ - default_options(sbi); - - err = parse_options(sb, data); - if (unlikely(err)) - return err; - - if (!silent) - infoln("root inode @ nid %llu", ROOT_NID(sbi)); - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - -#ifdef CONFIG_EROFS_FS_ZIP - INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); -#endif - - /* get the root inode */ - inode = erofs_iget(sb, ROOT_NID(sbi), true); - if (IS_ERR(inode)) - return PTR_ERR(inode); - - if (unlikely(!S_ISDIR(inode->i_mode))) { - errln("rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); - iput(inode); - return -EINVAL; - } - - sb->s_root = d_make_root(inode); - if (unlikely(!sb->s_root)) - return -ENOMEM; - - erofs_shrinker_register(sb); - /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ - err = erofs_init_managed_cache(sb); - if (unlikely(err)) - return err; - - if (!silent) - infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); - return 0; -} - -static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); -} - -/* - * could be triggered after deactivate_locked_super() - * is called, thus including umount and failed to initialize. - */ -static void erofs_kill_sb(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - - WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); - infoln("unmounting for %s", sb->s_id); - - kill_block_super(sb); - - sbi = EROFS_SB(sb); - if (!sbi) - return; - kfree(sbi); - sb->s_fs_info = NULL; -} - -/* called when ->s_root is non-NULL */ -static void erofs_put_super(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - DBG_BUGON(!sbi); - - erofs_shrinker_unregister(sb); -#ifdef CONFIG_EROFS_FS_ZIP - iput(sbi->managed_cache); - sbi->managed_cache = NULL; -#endif -} - -static struct file_system_type erofs_fs_type = { - .owner = THIS_MODULE, - .name = "erofs", - .mount = erofs_mount, - .kill_sb = erofs_kill_sb, - .fs_flags = FS_REQUIRES_DEV, -}; -MODULE_ALIAS_FS("erofs"); - -static int __init erofs_module_init(void) -{ - int err; - - erofs_check_ondisk_layout_definitions(); - infoln("initializing erofs " EROFS_VERSION); - - err = erofs_init_inode_cache(); - if (err) - goto icache_err; - - err = erofs_init_shrinker(); - if (err) - goto shrinker_err; - - err = z_erofs_init_zip_subsystem(); - if (err) - goto zip_err; - - err = register_filesystem(&erofs_fs_type); - if (err) - goto fs_err; - - infoln("successfully to initialize erofs"); - return 0; - -fs_err: - z_erofs_exit_zip_subsystem(); -zip_err: - erofs_exit_shrinker(); -shrinker_err: - erofs_exit_inode_cache(); -icache_err: - return err; -} - -static void __exit erofs_module_exit(void) -{ - unregister_filesystem(&erofs_fs_type); - z_erofs_exit_zip_subsystem(); - erofs_exit_shrinker(); - erofs_exit_inode_cache(); - infoln("successfully finalize erofs"); -} - -/* get filesystem statistics */ -static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) -{ - struct super_block *sb = dentry->d_sb; - struct erofs_sb_info *sbi = EROFS_SB(sb); - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); - - buf->f_type = sb->s_magic; - buf->f_bsize = EROFS_BLKSIZ; - buf->f_blocks = sbi->blocks; - buf->f_bfree = buf->f_bavail = 0; - - buf->f_files = ULLONG_MAX; - buf->f_ffree = ULLONG_MAX - sbi->inos; - - buf->f_namelen = EROFS_NAME_LEN; - - buf->f_fsid.val[0] = (u32)id; - buf->f_fsid.val[1] = (u32)(id >> 32); - return 0; -} - -static int erofs_show_options(struct seq_file *seq, struct dentry *root) -{ - struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); - -#ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(sbi, XATTR_USER)) - seq_puts(seq, ",user_xattr"); - else - seq_puts(seq, ",nouser_xattr"); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(sbi, POSIX_ACL)) - seq_puts(seq, ",acl"); - else - seq_puts(seq, ",noacl"); -#endif - if (test_opt(sbi, FAULT_INJECTION)) - seq_printf(seq, ",fault_injection=%u", - erofs_get_fault_rate(sbi)); -#ifdef CONFIG_EROFS_FS_ZIP - if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { - seq_puts(seq, ",cache_strategy=disabled"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { - seq_puts(seq, ",cache_strategy=readahead"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { - seq_puts(seq, ",cache_strategy=readaround"); - } else { - seq_puts(seq, ",cache_strategy=(unknown)"); - DBG_BUGON(1); - } -#endif - return 0; -} - -static int erofs_remount(struct super_block *sb, int *flags, char *data) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - unsigned int org_mnt_opt = sbi->mount_opt; - unsigned int org_inject_rate = erofs_get_fault_rate(sbi); - int err; - - DBG_BUGON(!sb_rdonly(sb)); - err = parse_options(sb, data); - if (err) - goto out; - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - - *flags |= SB_RDONLY; - return 0; -out: - __erofs_build_fault_attr(sbi, org_inject_rate); - sbi->mount_opt = org_mnt_opt; - - return err; -} - -const struct super_operations erofs_sops = { - .put_super = erofs_put_super, - .alloc_inode = alloc_inode, - .free_inode = free_inode, - .statfs = erofs_statfs, - .show_options = erofs_show_options, - .remount_fs = erofs_remount, -}; - -module_init(erofs_module_init); -module_exit(erofs_module_exit); - -MODULE_DESCRIPTION("Enhanced ROM File System"); -MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); -MODULE_LICENSE("GPL"); - diff --git a/drivers/staging/erofs/tagptr.h b/drivers/staging/erofs/tagptr.h deleted file mode 100644 index a72897c86744..000000000000 --- a/drivers/staging/erofs/tagptr.h +++ /dev/null @@ -1,110 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * A tagged pointer implementation - * - * Copyright (C) 2018 Gao Xiang - */ -#ifndef __EROFS_FS_TAGPTR_H -#define __EROFS_FS_TAGPTR_H - -#include -#include - -/* - * the name of tagged pointer types are tagptr{1, 2, 3...}_t - * avoid directly using the internal structs __tagptr{1, 2, 3...} - */ -#define __MAKE_TAGPTR(n) \ -typedef struct __tagptr##n { \ - uintptr_t v; \ -} tagptr##n##_t; - -__MAKE_TAGPTR(1) -__MAKE_TAGPTR(2) -__MAKE_TAGPTR(3) -__MAKE_TAGPTR(4) - -#undef __MAKE_TAGPTR - -extern void __compiletime_error("bad tagptr tags") - __bad_tagptr_tags(void); - -extern void __compiletime_error("bad tagptr type") - __bad_tagptr_type(void); - -/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ -#define __tagptr_mask_1(ptr, n) \ - __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ - (1UL << (n)) - 1 : - -#define __tagptr_mask(ptr) (\ - __tagptr_mask_1(ptr, 1) ( \ - __tagptr_mask_1(ptr, 2) ( \ - __tagptr_mask_1(ptr, 3) ( \ - __tagptr_mask_1(ptr, 4) ( \ - __bad_tagptr_type(), 0))))) - -/* generate a tagged pointer from a raw value */ -#define tagptr_init(type, val) \ - ((typeof(type)){ .v = (uintptr_t)(val) }) - -/* - * directly cast a tagged pointer to the native pointer type, which - * could be used for backward compatibility of existing code. - */ -#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) - -/* encode tagged pointers */ -#define tagptr_fold(type, ptr, _tags) ({ \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ - __bad_tagptr_tags(); \ -tagptr_init(type, (uintptr_t)(ptr) | tags); }) - -/* decode tagged pointers */ -#define tagptr_unfold_ptr(tptr) \ - ((void *)((tptr).v & ~__tagptr_mask(tptr))) - -#define tagptr_unfold_tags(tptr) \ - ((tptr).v & __tagptr_mask(tptr)) - -/* operations for the tagger pointer */ -#define tagptr_eq(_tptr1, _tptr2) ({ \ - typeof(_tptr1) tptr1 = (_tptr1); \ - typeof(_tptr2) tptr2 = (_tptr2); \ - (void)(&tptr1 == &tptr2); \ -(tptr1).v == (tptr2).v; }) - -/* lock-free CAS operation */ -#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - typeof(_o) o = (_o); \ - typeof(_n) n = (_n); \ - (void)(&o == &n); \ - (void)(&o == ptptr); \ -tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) - -/* wrap WRITE_ONCE if atomic update is needed */ -#define tagptr_replace_tags(_ptptr, tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ -*ptptr; }) - -#define tagptr_set_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v |= tags; \ -*ptptr; }) - -#define tagptr_clear_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v &= ~tags; \ -*ptptr; }) - -#endif /* __EROFS_FS_TAGPTR_H */ - diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c deleted file mode 100644 index 814c2ee037ae..000000000000 --- a/drivers/staging/erofs/utils.c +++ /dev/null @@ -1,335 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/utils.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) -{ - struct page *page; - - if (!list_empty(pool)) { - page = lru_to_page(pool); - DBG_BUGON(page_ref_count(page) != 1); - list_del(&page->lru); - } else { - page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); - } - return page; -} - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -static struct { - u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; -} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; - -void *erofs_get_pcpubuf(unsigned int pagenr) -{ - preempt_disable(); - return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -/* global shrink count (for all mounted EROFS instances) */ -static atomic_long_t erofs_global_shrink_cnt; - -#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) -#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) - -static int erofs_workgroup_get(struct erofs_workgroup *grp) -{ - int o; - -repeat: - o = erofs_wait_on_workgroup_freezed(grp); - if (unlikely(o <= 0)) - return -1; - - if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) - goto repeat; - - /* decrease refcount paired by erofs_workgroup_put */ - if (unlikely(o == 1)) - atomic_long_dec(&erofs_global_shrink_cnt); - return 0; -} - -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_workgroup *grp; - -repeat: - rcu_read_lock(); - grp = radix_tree_lookup(&sbi->workstn_tree, index); - if (grp) { - *tag = xa_pointer_tag(grp); - grp = xa_untag_pointer(grp); - - if (erofs_workgroup_get(grp)) { - /* prefer to relax rcu read side */ - rcu_read_unlock(); - goto repeat; - } - - DBG_BUGON(index != grp->index); - } - rcu_read_unlock(); - return grp; -} - -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, - bool tag) -{ - struct erofs_sb_info *sbi; - int err; - - /* grp shouldn't be broken or used before */ - if (unlikely(atomic_read(&grp->refcount) != 1)) { - DBG_BUGON(1); - return -EINVAL; - } - - err = radix_tree_preload(GFP_NOFS); - if (err) - return err; - - sbi = EROFS_SB(sb); - xa_lock(&sbi->workstn_tree); - - grp = xa_tag_pointer(grp, tag); - - /* - * Bump up reference count before making this workgroup - * visible to other users in order to avoid potential UAF - * without serialized by workstn_lock. - */ - __erofs_workgroup_get(grp); - - err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); - if (unlikely(err)) - /* - * it's safe to decrease since the workgroup isn't visible - * and refcount >= 2 (cannot be freezed). - */ - __erofs_workgroup_put(grp); - - xa_unlock(&sbi->workstn_tree); - radix_tree_preload_end(); - return err; -} - -static void __erofs_workgroup_free(struct erofs_workgroup *grp) -{ - atomic_long_dec(&erofs_global_shrink_cnt); - erofs_workgroup_free_rcu(grp); -} - -int erofs_workgroup_put(struct erofs_workgroup *grp) -{ - int count = atomic_dec_return(&grp->refcount); - - if (count == 1) - atomic_long_inc(&erofs_global_shrink_cnt); - else if (!count) - __erofs_workgroup_free(grp); - return count; -} - -static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) -{ - erofs_workgroup_unfreeze(grp, 0); - __erofs_workgroup_free(grp); -} - -static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) -{ - /* - * If managed cache is on, refcount of workgroups - * themselves could be < 0 (freezed). In other words, - * there is no guarantee that all refcounts > 0. - */ - if (!erofs_workgroup_try_to_freeze(grp, 1)) - return false; - - /* - * Note that all cached pages should be unattached - * before deleted from the radix tree. Otherwise some - * cached pages could be still attached to the orphan - * old workgroup when the new one is available in the tree. - */ - if (erofs_try_to_free_all_cached_pages(sbi, grp)) { - erofs_workgroup_unfreeze(grp, 1); - return false; - } - - /* - * It's impossible to fail after the workgroup is freezed, - * however in order to avoid some race conditions, add a - * DBG_BUGON to observe this in advance. - */ - DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, - grp->index)) != grp); - - /* - * If managed cache is on, last refcount should indicate - * the related workstation. - */ - erofs_workgroup_unfreeze_final(grp); - return true; -} - -static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, - bool cleanup) -{ - pgoff_t first_index = 0; - void *batch[PAGEVEC_SIZE]; - unsigned int freed = 0; - - int i, found; -repeat: - xa_lock(&sbi->workstn_tree); - - found = radix_tree_gang_lookup(&sbi->workstn_tree, - batch, first_index, PAGEVEC_SIZE); - - for (i = 0; i < found; ++i) { - struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); - - first_index = grp->index + 1; - - /* try to shrink each valid workgroup */ - if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) - continue; - - ++freed; - if (unlikely(!--nr_shrink)) - break; - } - xa_unlock(&sbi->workstn_tree); - - if (i && nr_shrink) - goto repeat; - return freed; -} - -/* protected by 'erofs_sb_list_lock' */ -static unsigned int shrinker_run_no; - -/* protects the mounted 'erofs_sb_list' */ -static DEFINE_SPINLOCK(erofs_sb_list_lock); -static LIST_HEAD(erofs_sb_list); - -void erofs_shrinker_register(struct super_block *sb) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - - mutex_init(&sbi->umount_mutex); - - spin_lock(&erofs_sb_list_lock); - list_add(&sbi->list, &erofs_sb_list); - spin_unlock(&erofs_sb_list_lock); -} - -void erofs_shrinker_unregister(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - mutex_lock(&sbi->umount_mutex); - erofs_shrink_workstation(sbi, ~0UL, true); - - spin_lock(&erofs_sb_list_lock); - list_del(&sbi->list); - spin_unlock(&erofs_sb_list_lock); - mutex_unlock(&sbi->umount_mutex); -} - -static unsigned long erofs_shrink_count(struct shrinker *shrink, - struct shrink_control *sc) -{ - return atomic_long_read(&erofs_global_shrink_cnt); -} - -static unsigned long erofs_shrink_scan(struct shrinker *shrink, - struct shrink_control *sc) -{ - struct erofs_sb_info *sbi; - struct list_head *p; - - unsigned long nr = sc->nr_to_scan; - unsigned int run_no; - unsigned long freed = 0; - - spin_lock(&erofs_sb_list_lock); - do { - run_no = ++shrinker_run_no; - } while (run_no == 0); - - /* Iterate over all mounted superblocks and try to shrink them */ - p = erofs_sb_list.next; - while (p != &erofs_sb_list) { - sbi = list_entry(p, struct erofs_sb_info, list); - - /* - * We move the ones we do to the end of the list, so we stop - * when we see one we have already done. - */ - if (sbi->shrinker_run_no == run_no) - break; - - if (!mutex_trylock(&sbi->umount_mutex)) { - p = p->next; - continue; - } - - spin_unlock(&erofs_sb_list_lock); - sbi->shrinker_run_no = run_no; - - freed += erofs_shrink_workstation(sbi, nr, false); - - spin_lock(&erofs_sb_list_lock); - /* Get the next list element before we move this one */ - p = p->next; - - /* - * Move this one to the end of the list to provide some - * fairness. - */ - list_move_tail(&sbi->list, &erofs_sb_list); - mutex_unlock(&sbi->umount_mutex); - - if (freed >= nr) - break; - } - spin_unlock(&erofs_sb_list_lock); - return freed; -} - -static struct shrinker erofs_shrinker_info = { - .scan_objects = erofs_shrink_scan, - .count_objects = erofs_shrink_count, - .seeks = DEFAULT_SEEKS, -}; - -int __init erofs_init_shrinker(void) -{ - return register_shrinker(&erofs_shrinker_info); -} - -void erofs_exit_shrinker(void) -{ - unregister_shrinker(&erofs_shrinker_info); -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c deleted file mode 100644 index e7e5840e3f9d..000000000000 --- a/drivers/staging/erofs/xattr.c +++ /dev/null @@ -1,705 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/xattr.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include "xattr.h" - -struct xattr_iter { - struct super_block *sb; - struct page *page; - void *kaddr; - - erofs_blk_t blkaddr; - unsigned int ofs; -}; - -static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) -{ - /* the only user of kunmap() is 'init_inode_xattrs' */ - if (unlikely(!atomic)) - kunmap(it->page); - else - kunmap_atomic(it->kaddr); - - unlock_page(it->page); - put_page(it->page); -} - -static inline void xattr_iter_end_final(struct xattr_iter *it) -{ - if (!it->page) - return; - - xattr_iter_end(it, true); -} - -static int init_inode_xattrs(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct xattr_iter it; - unsigned int i; - struct erofs_xattr_ibody_header *ih; - struct super_block *sb; - struct erofs_sb_info *sbi; - bool atomic_map; - int ret = 0; - - /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - /* someone has initialized xattrs for us? */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - goto out_unlock; - - /* - * bypass all xattr operations if ->xattr_isize is not greater than - * sizeof(struct erofs_xattr_ibody_header), in detail: - * 1) it is not enough to contain erofs_xattr_ibody_header then - * ->xattr_isize should be 0 (it means no xattr); - * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk - * undefined right now (maybe use later with some new sb feature). - */ - if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { - errln("xattr_isize %d of nid %llu is not supported yet", - vi->xattr_isize, vi->nid); - ret = -EOPNOTSUPP; - goto out_unlock; - } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { - if (unlikely(vi->xattr_isize)) { - errln("bogus xattr ibody @ nid %llu", vi->nid); - DBG_BUGON(1); - ret = -EFSCORRUPTED; - goto out_unlock; /* xattr ondisk layout error */ - } - ret = -ENOATTR; - goto out_unlock; - } - - sb = inode->i_sb; - sbi = EROFS_SB(sb); - it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); - it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); - - it.page = erofs_get_inline_page(inode, it.blkaddr); - if (IS_ERR(it.page)) { - ret = PTR_ERR(it.page); - goto out_unlock; - } - - /* read in shared xattr array (non-atomic, see kmalloc below) */ - it.kaddr = kmap(it.page); - atomic_map = false; - - ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); - - vi->xattr_shared_count = ih->h_shared_count; - vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, - sizeof(uint), GFP_KERNEL); - if (!vi->xattr_shared_xattrs) { - xattr_iter_end(&it, atomic_map); - ret = -ENOMEM; - goto out_unlock; - } - - /* let's skip ibody header */ - it.ofs += sizeof(struct erofs_xattr_ibody_header); - - for (i = 0; i < vi->xattr_shared_count; ++i) { - if (unlikely(it.ofs >= EROFS_BLKSIZ)) { - /* cannot be unaligned */ - DBG_BUGON(it.ofs != EROFS_BLKSIZ); - xattr_iter_end(&it, atomic_map); - - it.page = erofs_get_meta_page(sb, ++it.blkaddr, - S_ISDIR(inode->i_mode)); - if (IS_ERR(it.page)) { - kfree(vi->xattr_shared_xattrs); - vi->xattr_shared_xattrs = NULL; - ret = PTR_ERR(it.page); - goto out_unlock; - } - - it.kaddr = kmap_atomic(it.page); - atomic_map = true; - it.ofs = 0; - } - vi->xattr_shared_xattrs[i] = - le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); - it.ofs += sizeof(__le32); - } - xattr_iter_end(&it, atomic_map); - - set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); - -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); - return ret; -} - -/* - * the general idea for these return values is - * if 0 is returned, go on processing the current xattr; - * 1 (> 0) is returned, skip this round to process the next xattr; - * -err (< 0) is returned, an error (maybe ENOXATTR) occurred - * and need to be handled - */ -struct xattr_iter_handlers { - int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); - int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); - int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); - void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); -}; - -static inline int xattr_iter_fixup(struct xattr_iter *it) -{ - if (it->ofs < EROFS_BLKSIZ) - return 0; - - xattr_iter_end(it, true); - - it->blkaddr += erofs_blknr(it->ofs); - - it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); - if (IS_ERR(it->page)) { - int err = PTR_ERR(it->page); - - it->page = NULL; - return err; - } - - it->kaddr = kmap_atomic(it->page); - it->ofs = erofs_blkoff(it->ofs); - return 0; -} - -static int inline_xattr_iter_begin(struct xattr_iter *it, - struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); - unsigned int xattr_header_sz, inline_xattr_ofs; - - xattr_header_sz = inlinexattr_header_size(inode); - if (unlikely(xattr_header_sz >= vi->xattr_isize)) { - DBG_BUGON(xattr_header_sz > vi->xattr_isize); - return -ENOATTR; - } - - inline_xattr_ofs = vi->inode_isize + xattr_header_sz; - - it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); - it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); - - it->page = erofs_get_inline_page(inode, it->blkaddr); - if (IS_ERR(it->page)) - return PTR_ERR(it->page); - - it->kaddr = kmap_atomic(it->page); - return vi->xattr_isize - xattr_header_sz; -} - -/* - * Regardless of success or failure, `xattr_foreach' will end up with - * `ofs' pointing to the next xattr item rather than an arbitrary position. - */ -static int xattr_foreach(struct xattr_iter *it, - const struct xattr_iter_handlers *op, - unsigned int *tlimit) -{ - struct erofs_xattr_entry entry; - unsigned int value_sz, processed, slice; - int err; - - /* 0. fixup blkaddr, ofs, ipage */ - err = xattr_iter_fixup(it); - if (err) - return err; - - /* - * 1. read xattr entry to the memory, - * since we do EROFS_XATTR_ALIGN - * therefore entry should be in the page - */ - entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); - if (tlimit) { - unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); - - /* xattr on-disk corruption: xattr entry beyond xattr_isize */ - if (unlikely(*tlimit < entry_sz)) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - *tlimit -= entry_sz; - } - - it->ofs += sizeof(struct erofs_xattr_entry); - value_sz = le16_to_cpu(entry.e_value_size); - - /* handle entry */ - err = op->entry(it, &entry); - if (err) { - it->ofs += entry.e_name_len + value_sz; - goto out; - } - - /* 2. handle xattr name (ofs will finally be at the end of name) */ - processed = 0; - - while (processed < entry.e_name_len) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - entry.e_name_len - processed); - - /* handle name */ - err = op->name(it, processed, it->kaddr + it->ofs, slice); - if (err) { - it->ofs += entry.e_name_len - processed + value_sz; - goto out; - } - - it->ofs += slice; - processed += slice; - } - - /* 3. handle xattr value */ - processed = 0; - - if (op->alloc_buffer) { - err = op->alloc_buffer(it, value_sz); - if (err) { - it->ofs += value_sz; - goto out; - } - } - - while (processed < value_sz) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - value_sz - processed); - op->value(it, processed, it->kaddr + it->ofs, slice); - it->ofs += slice; - processed += slice; - } - -out: - /* xattrs should be 4-byte aligned (on-disk constraint) */ - it->ofs = EROFS_XATTR_ALIGN(it->ofs); - return err < 0 ? err : 0; -} - -struct getxattr_iter { - struct xattr_iter it; - - char *buffer; - int buffer_size, index; - struct qstr name; -}; - -static int xattr_entrymatch(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return (it->index != entry->e_name_index || - it->name.len != entry->e_name_len) ? -ENOATTR : 0; -} - -static int xattr_namematch(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; -} - -static int xattr_checkbuffer(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - int err = it->buffer_size < value_sz ? -ERANGE : 0; - - it->buffer_size = value_sz; - return !it->buffer ? 1 : err; -} - -static void xattr_copyvalue(struct xattr_iter *_it, - unsigned int processed, - char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - memcpy(it->buffer + processed, buf, len); -} - -static const struct xattr_iter_handlers find_xattr_handlers = { - .entry = xattr_entrymatch, - .name = xattr_namematch, - .alloc_buffer = xattr_checkbuffer, - .value = xattr_copyvalue -}; - -static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, inode); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); - if (ret != -ENOATTR) - break; - } - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = -ENOATTR; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); - if (ret != -ENOATTR) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static bool erofs_xattr_user_list(struct dentry *dentry) -{ - return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); -} - -static bool erofs_xattr_trusted_list(struct dentry *dentry) -{ - return capable(CAP_SYS_ADMIN); -} - -int erofs_getxattr(struct inode *inode, int index, - const char *name, - void *buffer, size_t buffer_size) -{ - int ret; - struct getxattr_iter it; - - if (unlikely(!name)) - return -EINVAL; - - ret = init_inode_xattrs(inode); - if (ret) - return ret; - - it.index = index; - - it.name.len = strlen(name); - if (it.name.len > EROFS_NAME_LEN) - return -ERANGE; - it.name.name = name; - - it.buffer = buffer; - it.buffer_size = buffer_size; - - it.it.sb = inode->i_sb; - ret = inline_getxattr(inode, &it); - if (ret == -ENOATTR) - ret = shared_getxattr(inode, &it); - return ret; -} - -static int erofs_xattr_generic_get(const struct xattr_handler *handler, - struct dentry *unused, struct inode *inode, - const char *name, void *buffer, size_t size) -{ - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - switch (handler->flags) { - case EROFS_XATTR_INDEX_USER: - if (!test_opt(sbi, XATTR_USER)) - return -EOPNOTSUPP; - break; - case EROFS_XATTR_INDEX_TRUSTED: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - break; - case EROFS_XATTR_INDEX_SECURITY: - break; - default: - return -EINVAL; - } - - return erofs_getxattr(inode, handler->flags, name, buffer, size); -} - -const struct xattr_handler erofs_xattr_user_handler = { - .prefix = XATTR_USER_PREFIX, - .flags = EROFS_XATTR_INDEX_USER, - .list = erofs_xattr_user_list, - .get = erofs_xattr_generic_get, -}; - -const struct xattr_handler erofs_xattr_trusted_handler = { - .prefix = XATTR_TRUSTED_PREFIX, - .flags = EROFS_XATTR_INDEX_TRUSTED, - .list = erofs_xattr_trusted_list, - .get = erofs_xattr_generic_get, -}; - -#ifdef CONFIG_EROFS_FS_SECURITY -const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = EROFS_XATTR_INDEX_SECURITY, - .get = erofs_xattr_generic_get, -}; -#endif - -const struct xattr_handler *erofs_xattr_handlers[] = { - &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - &posix_acl_access_xattr_handler, - &posix_acl_default_xattr_handler, -#endif - &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - &erofs_xattr_security_handler, -#endif - NULL, -}; - -struct listxattr_iter { - struct xattr_iter it; - - struct dentry *dentry; - char *buffer; - int buffer_size, buffer_ofs; -}; - -static int xattr_entrylist(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - unsigned int prefix_len; - const char *prefix; - - const struct xattr_handler *h = - erofs_xattr_handler(entry->e_name_index); - - if (!h || (h->list && !h->list(it->dentry))) - return 1; - - prefix = xattr_prefix(h); - prefix_len = strlen(prefix); - - if (!it->buffer) { - it->buffer_ofs += prefix_len + entry->e_name_len + 1; - return 1; - } - - if (it->buffer_ofs + prefix_len - + entry->e_name_len + 1 > it->buffer_size) - return -ERANGE; - - memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); - it->buffer_ofs += prefix_len; - return 0; -} - -static int xattr_namelist(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - memcpy(it->buffer + it->buffer_ofs, buf, len); - it->buffer_ofs += len; - return 0; -} - -static int xattr_skipvalue(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - it->buffer[it->buffer_ofs++] = '\0'; - return 1; -} - -static const struct xattr_iter_handlers list_xattr_handlers = { - .entry = xattr_entrylist, - .name = xattr_namelist, - .alloc_buffer = xattr_skipvalue, - .value = NULL -}; - -static int inline_listxattr(struct listxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); - if (ret) - break; - } - xattr_iter_end_final(&it->it); - return ret ? ret : it->buffer_ofs; -} - -static int shared_listxattr(struct listxattr_iter *it) -{ - struct inode *const inode = d_inode(it->dentry); - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = 0; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); - if (ret) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_ofs; -} - -ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - int ret; - struct listxattr_iter it; - - ret = init_inode_xattrs(d_inode(dentry)); - if (ret) - return ret; - - it.dentry = dentry; - it.buffer = buffer; - it.buffer_size = buffer_size; - it.buffer_ofs = 0; - - it.it.sb = dentry->d_sb; - - ret = inline_listxattr(&it); - if (ret < 0 && ret != -ENOATTR) - return ret; - return shared_listxattr(&it); -} - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type) -{ - struct posix_acl *acl; - int prefix, rc; - char *value = NULL; - - switch (type) { - case ACL_TYPE_ACCESS: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; - break; - case ACL_TYPE_DEFAULT: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; - break; - default: - return ERR_PTR(-EINVAL); - } - - rc = erofs_getxattr(inode, prefix, "", NULL, 0); - if (rc > 0) { - value = kmalloc(rc, GFP_KERNEL); - if (!value) - return ERR_PTR(-ENOMEM); - rc = erofs_getxattr(inode, prefix, "", value, rc); - } - - if (rc == -ENOATTR) - acl = NULL; - else if (rc < 0) - acl = ERR_PTR(rc); - else - acl = posix_acl_from_xattr(&init_user_ns, value, rc); - kfree(value); - return acl; -} -#endif - diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h deleted file mode 100644 index e20249647541..000000000000 --- a/drivers/staging/erofs/xattr.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/xattr.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_XATTR_H -#define __EROFS_XATTR_H - -#include "internal.h" -#include -#include - -/* Attribute not found */ -#define ENOATTR ENODATA - -static inline unsigned int inlinexattr_header_size(struct inode *inode) -{ - return sizeof(struct erofs_xattr_ibody_header) - + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; -} - -static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ -#ifdef CONFIG_EROFS_FS_XATTR - return sbi->xattr_blkaddr + - xattr_id * sizeof(__u32) / EROFS_BLKSIZ; -#else - return 0; -#endif -} - -static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ - return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; -} - -#ifdef CONFIG_EROFS_FS_XATTR -extern const struct xattr_handler erofs_xattr_user_handler; -extern const struct xattr_handler erofs_xattr_trusted_handler; -#ifdef CONFIG_EROFS_FS_SECURITY -extern const struct xattr_handler erofs_xattr_security_handler; -#endif - -static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) -{ -static const struct xattr_handler *xattr_handler_map[] = { - [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, - [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = - &posix_acl_default_xattr_handler, -#endif - [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, -#endif -}; - - return idx && idx < ARRAY_SIZE(xattr_handler_map) ? - xattr_handler_map[idx] : NULL; -} - -extern const struct xattr_handler *erofs_xattr_handlers[]; - -int erofs_getxattr(struct inode *, int, const char *, void *, size_t); -ssize_t erofs_listxattr(struct dentry *, char *, size_t); -#else -static inline int erofs_getxattr(struct inode *inode, int index, - const char *name, void *buffer, - size_t buffer_size) -{ - return -EOPNOTSUPP; -} - -static inline ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_XATTR */ - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type); -#else -#define erofs_get_acl (NULL) -#endif - -#endif - diff --git a/drivers/staging/erofs/zdata.c b/drivers/staging/erofs/zdata.c deleted file mode 100644 index 60d7c20db87d..000000000000 --- a/drivers/staging/erofs/zdata.c +++ /dev/null @@ -1,1434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zdata.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "zdata.h" -#include "compress.h" -#include - -#include - -/* - * a compressed_pages[] placeholder in order to avoid - * being filled with file pages for in-place decompression. - */ -#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) - -/* how to allocate cached pages for a pcluster */ -enum z_erofs_cache_alloctype { - DONTALLOC, /* don't allocate any cached pages */ - DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ -}; - -/* - * tagged pointer with 1-bit tag for all compressed pages - * tag 0 - the page is just found with an extra page reference - */ -typedef tagptr1_t compressed_page_t; - -#define tag_compressed_page_justfound(page) \ - tagptr_fold(compressed_page_t, page, 1) - -static struct workqueue_struct *z_erofs_workqueue __read_mostly; -static struct kmem_cache *pcluster_cachep __read_mostly; - -void z_erofs_exit_zip_subsystem(void) -{ - destroy_workqueue(z_erofs_workqueue); - kmem_cache_destroy(pcluster_cachep); -} - -static inline int init_unzip_workqueue(void) -{ - const unsigned int onlinecpus = num_possible_cpus(); - const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; - - /* - * no need to spawn too many threads, limiting threads could minimum - * scheduling overhead, perhaps per-CPU threads should be better? - */ - z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, - onlinecpus + onlinecpus / 4); - return z_erofs_workqueue ? 0 : -ENOMEM; -} - -static void init_once(void *ptr) -{ - struct z_erofs_pcluster *pcl = ptr; - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - unsigned int i; - - mutex_init(&cl->lock); - cl->nr_pages = 0; - cl->vcnt = 0; - for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) - pcl->compressed_pages[i] = NULL; -} - -static void init_always(struct z_erofs_pcluster *pcl) -{ - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - - atomic_set(&pcl->obj.refcount, 1); - - DBG_BUGON(cl->nr_pages); - DBG_BUGON(cl->vcnt); -} - -int __init z_erofs_init_zip_subsystem(void) -{ - pcluster_cachep = kmem_cache_create("erofs_compress", - Z_EROFS_WORKGROUP_SIZE, 0, - SLAB_RECLAIM_ACCOUNT, init_once); - if (pcluster_cachep) { - if (!init_unzip_workqueue()) - return 0; - - kmem_cache_destroy(pcluster_cachep); - } - return -ENOMEM; -} - -enum z_erofs_collectmode { - COLLECT_SECONDARY, - COLLECT_PRIMARY, - /* - * The current collection was the tail of an exist chain, in addition - * that the previous processed chained collections are all decided to - * be hooked up to it. - * A new chain will be created for the remaining collections which are - * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, - * the next collection cannot reuse the whole page safely in - * the following scenario: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (belongs to the next cl) | (belongs to the current cl) | - * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| - */ - COLLECT_PRIMARY_HOOKED, - COLLECT_PRIMARY_FOLLOWED_NOINPLACE, - /* - * The current collection has been linked with the owned chain, and - * could also be linked with the remaining collections, which means - * if the processing page is the tail page of the collection, thus - * the current collection can safely use the whole page (since - * the previous collection is under control) for in-place I/O, as - * illustrated below: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (of the current cl) | (of the previous collection) | - * | PRIMARY_FOLLOWED or | | - * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| - * - * [ (*) the above page can be used as inplace I/O. ] - */ - COLLECT_PRIMARY_FOLLOWED, -}; - -struct z_erofs_collector { - struct z_erofs_pagevec_ctor vector; - - struct z_erofs_pcluster *pcl, *tailpcl; - struct z_erofs_collection *cl; - struct page **compressedpages; - z_erofs_next_pcluster_t owned_head; - - enum z_erofs_collectmode mode; -}; - -struct z_erofs_decompress_frontend { - struct inode *const inode; - - struct z_erofs_collector clt; - struct erofs_map_blocks map; - - /* used for applying cache strategy on the fly */ - bool backmost; - erofs_off_t headoffset; -}; - -#define COLLECTOR_INIT() { \ - .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = COLLECT_PRIMARY_FOLLOWED } - -#define DECOMPRESS_FRONTEND_INIT(__i) { \ - .inode = __i, .clt = COLLECTOR_INIT(), \ - .backmost = true, } - -static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; -static DEFINE_MUTEX(z_pagemap_global_lock); - -static void preload_compressed_pages(struct z_erofs_collector *clt, - struct address_space *mc, - enum z_erofs_cache_alloctype type, - struct list_head *pagepool) -{ - const struct z_erofs_pcluster *pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct page **pages = clt->compressedpages; - pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); - bool standalone = true; - - if (clt->mode < COLLECT_PRIMARY_FOLLOWED) - return; - - for (; pages < pcl->compressed_pages + clusterpages; ++pages) { - struct page *page; - compressed_page_t t; - - /* the compressed page was loaded before */ - if (READ_ONCE(*pages)) - continue; - - page = find_get_page(mc, index); - - if (page) { - t = tag_compressed_page_justfound(page); - } else if (type == DELAYEDALLOC) { - t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); - } else { /* DONTALLOC */ - if (standalone) - clt->compressedpages = pages; - standalone = false; - continue; - } - - if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) - continue; - - if (page) - put_page(page); - } - - if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; -} - -/* called by erofs_shrinker to get rid of all compressed_pages */ -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct address_space *const mapping = MNGD_MAPPING(sbi); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int i; - - /* - * refcount of workgroup is now freezed as 1, - * therefore no need to worry about available decompression users. - */ - for (i = 0; i < clusterpages; ++i) { - struct page *page = pcl->compressed_pages[i]; - - if (!page) - continue; - - /* block other users from reclaiming or migrating the page */ - if (!trylock_page(page)) - return -EBUSY; - - if (unlikely(page->mapping != mapping)) - continue; - - /* barrier is implied in the following 'unlock_page' */ - WRITE_ONCE(pcl->compressed_pages[i], NULL); - set_page_private(page, 0); - ClearPagePrivate(page); - - unlock_page(page); - put_page(page); - } - return 0; -} - -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = (void *)page_private(page); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int ret = 0; /* 0 - busy */ - - if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { - unsigned int i; - - for (i = 0; i < clusterpages; ++i) { - if (pcl->compressed_pages[i] == page) { - WRITE_ONCE(pcl->compressed_pages[i], NULL); - ret = 1; - break; - } - } - erofs_workgroup_unfreeze(&pcl->obj, 1); - - if (ret) { - ClearPagePrivate(page); - put_page(page); - } - } - return ret; -} - -/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ -static inline bool try_inplace_io(struct z_erofs_collector *clt, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - - while (clt->compressedpages < pcl->compressed_pages + clusterpages) { - if (!cmpxchg(clt->compressedpages++, NULL, page)) - return true; - } - return false; -} - -/* callers must be with collection lock held */ -static int z_erofs_attach_page(struct z_erofs_collector *clt, - struct page *page, - enum z_erofs_page_type type) -{ - int ret; - bool occupied; - - /* give priority for inplaceio */ - if (clt->mode >= COLLECT_PRIMARY && - type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && - try_inplace_io(clt, page)) - return 0; - - ret = z_erofs_pagevec_enqueue(&clt->vector, - page, type, &occupied); - clt->cl->vcnt += (unsigned int)ret; - - return ret ? 0 : -EAGAIN; -} - -static enum z_erofs_collectmode -try_to_claim_pcluster(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t *owned_head) -{ - /* let's claim these following types of pclusters */ -retry: - if (pcl->next == Z_EROFS_PCLUSTER_NIL) { - /* type 1, nil pcluster */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, - *owned_head) != Z_EROFS_PCLUSTER_NIL) - goto retry; - - *owned_head = &pcl->next; - /* lucky, I am the followee :) */ - return COLLECT_PRIMARY_FOLLOWED; - } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { - /* - * type 2, link to the end of a existing open chain, - * be careful that its submission itself is governed - * by the original owned chain. - */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - *owned_head) != Z_EROFS_PCLUSTER_TAIL) - goto retry; - *owned_head = Z_EROFS_PCLUSTER_TAIL; - return COLLECT_PRIMARY_HOOKED; - } - return COLLECT_PRIMARY; /* :( better luck next time */ -} - -static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct erofs_workgroup *grp; - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - unsigned int length; - bool tag; - - grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); - if (!grp) - return NULL; - - pcl = container_of(grp, struct z_erofs_pcluster, obj); - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - cl = z_erofs_primarycollection(pcl); - if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - length = READ_ONCE(pcl->length); - if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { - if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - } else { - unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; - - if (map->m_flags & EROFS_MAP_FULL_MAPPED) - llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; - - while (llen > length && - length != cmpxchg_relaxed(&pcl->length, length, llen)) { - cpu_relax(); - length = READ_ONCE(pcl->length); - } - } - mutex_lock(&cl->lock); - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); - /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = NULL; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - int err; - - /* no available workgroup, let's allocate one */ - pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); - if (unlikely(!pcl)) - return ERR_PTR(-ENOMEM); - - init_always(pcl); - pcl->obj.index = map->m_pa >> PAGE_SHIFT; - - pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | - (map->m_flags & EROFS_MAP_FULL_MAPPED ? - Z_EROFS_PCLUSTER_FULL_LENGTH : 0); - - if (map->m_flags & EROFS_MAP_ZIPPED) - pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; - else - pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - - pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; - pcl->clusterbits -= PAGE_SHIFT; - - /* new pclusters should be claimed as type 1, primary and followed */ - pcl->next = clt->owned_head; - clt->mode = COLLECT_PRIMARY_FOLLOWED; - - cl = z_erofs_primarycollection(pcl); - cl->pageofs = map->m_la & ~PAGE_MASK; - - /* - * lock all primary followed works before visible to others - * and mutex_trylock *never* fails for a new pcluster. - */ - mutex_trylock(&cl->lock); - - err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); - if (err) { - mutex_unlock(&cl->lock); - kmem_cache_free(pcluster_cachep, pcl); - return ERR_PTR(-EAGAIN); - } - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->owned_head = &pcl->next; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static int z_erofs_collector_begin(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_collection *cl; - - DBG_BUGON(clt->cl); - - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - - if (!PAGE_ALIGNED(map->m_pa)) { - DBG_BUGON(1); - return -EINVAL; - } - -repeat: - cl = cllookup(clt, inode, map); - if (!cl) { - cl = clregister(clt, inode, map); - - if (unlikely(cl == ERR_PTR(-EAGAIN))) - goto repeat; - } - - if (IS_ERR(cl)) - return PTR_ERR(cl); - - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, cl->vcnt); - - clt->compressedpages = clt->pcl->compressed_pages; - if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ - clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; - return 0; -} - -/* - * keep in mind that no referenced pclusters will be freed - * only after a RCU grace period. - */ -static void z_erofs_rcu_callback(struct rcu_head *head) -{ - struct z_erofs_collection *const cl = - container_of(head, struct z_erofs_collection, rcu); - - kmem_cache_free(pcluster_cachep, - container_of(cl, struct z_erofs_pcluster, - primary_collection)); -} - -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); - - call_rcu(&cl->rcu, z_erofs_rcu_callback); -} - -static void z_erofs_collection_put(struct z_erofs_collection *cl) -{ - struct z_erofs_pcluster *const pcl = - container_of(cl, struct z_erofs_pcluster, primary_collection); - - erofs_workgroup_put(&pcl->obj); -} - -static bool z_erofs_collector_end(struct z_erofs_collector *clt) -{ - struct z_erofs_collection *cl = clt->cl; - - if (!cl) - return false; - - z_erofs_pagevec_ctor_exit(&clt->vector, false); - mutex_unlock(&cl->lock); - - /* - * if all pending pages are added, don't hold its reference - * any longer if the pcluster isn't hosted by ourselves. - */ - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) - z_erofs_collection_put(cl); - - clt->cl = NULL; - return true; -} - -static inline struct page *__stagingpage_alloc(struct list_head *pagepool, - gfp_t gfp) -{ - struct page *page = erofs_allocpage(pagepool, gfp, true); - - page->mapping = Z_EROFS_MAPPING_STAGING; - return page; -} - -static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, - unsigned int cachestrategy, - erofs_off_t la) -{ - if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) - return false; - - if (fe->backmost) - return true; - - return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && - la < fe->headoffset; -} - -static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page, - struct list_head *pagepool) -{ - struct inode *const inode = fe->inode; - struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); - struct erofs_map_blocks *const map = &fe->map; - struct z_erofs_collector *const clt = &fe->clt; - const loff_t offset = page_offset(page); - bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); - - enum z_erofs_cache_alloctype cache_strategy; - enum z_erofs_page_type page_type; - unsigned int cur, end, spiltted, index; - int err = 0; - - /* register locked file pages as online pages in pack */ - z_erofs_onlinepage_init(page); - - spiltted = 0; - end = PAGE_SIZE; -repeat: - cur = end - 1; - - /* lucky, within the range of the current map_blocks */ - if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) { - /* didn't get a valid collection previously (very rare) */ - if (!clt->cl) - goto restart_now; - goto hitted; - } - - /* go ahead the next map_blocks */ - debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); - - if (z_erofs_collector_end(clt)) - fe->backmost = false; - - map->m_la = offset + cur; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (unlikely(err)) - goto err_out; - -restart_now: - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) - goto hitted; - - err = z_erofs_collector_begin(clt, inode, map); - if (unlikely(err)) - goto err_out; - - /* preload all compressed pages (maybe downgrade role if necessary) */ - if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) - cache_strategy = DELAYEDALLOC; - else - cache_strategy = DONTALLOC; - - preload_compressed_pages(clt, MNGD_MAPPING(sbi), - cache_strategy, pagepool); - - tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); -hitted: - cur = end - min_t(unsigned int, offset + end - map->m_la, end); - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, cur, end); - goto next_part; - } - - /* let's derive page type */ - page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : - (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); - - if (cur) - tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); - -retry: - err = z_erofs_attach_page(clt, page, page_type); - /* should allocate an additional staging page for pagevec */ - if (err == -EAGAIN) { - struct page *const newpage = - __stagingpage_alloc(pagepool, GFP_NOFS); - - err = z_erofs_attach_page(clt, newpage, - Z_EROFS_PAGE_TYPE_EXCLUSIVE); - if (likely(!err)) - goto retry; - } - - if (unlikely(err)) - goto err_out; - - index = page->index - (map->m_la >> PAGE_SHIFT); - - z_erofs_onlinepage_fixup(page, index, true); - - /* bump up the number of spiltted parts of a page */ - ++spiltted; - /* also update nr_pages */ - clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); -next_part: - /* can be used for verification */ - map->m_llen = offset + cur - map->m_la; - - end = cur; - if (end > 0) - goto repeat; - -out: - z_erofs_onlinepage_endio(page); - - debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", - __func__, page, spiltted, map->m_llen); - return err; - - /* if some error occurred while processing this page */ -err_out: - SetPageError(page); - goto out; -} - -static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) -{ - tagptr1_t t = tagptr_init(tagptr1_t, ptr); - struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); - bool background = tagptr_unfold_tags(t); - - if (!background) { - unsigned long flags; - - spin_lock_irqsave(&io->u.wait.lock, flags); - if (!atomic_add_return(bios, &io->pending_bios)) - wake_up_locked(&io->u.wait); - spin_unlock_irqrestore(&io->u.wait.lock, flags); - return; - } - - if (!atomic_add_return(bios, &io->pending_bios)) - queue_work(z_erofs_workqueue, &io->u.work); -} - -static inline void z_erofs_vle_read_endio(struct bio *bio) -{ - struct erofs_sb_info *sbi = NULL; - blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - bool cachemngd = false; - - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!page->mapping); - - if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { - sbi = EROFS_SB(page->mapping->host->i_sb); - - if (time_to_inject(sbi, FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - } - - /* sbi should already be gotten if the page is managed */ - if (sbi) - cachemngd = erofs_page_is_managed(sbi, page); - - if (unlikely(err)) - SetPageError(page); - else if (cachemngd) - SetPageUptodate(page); - - if (cachemngd) - unlock_page(page); - } - - z_erofs_vle_unzip_kickoff(bio->bi_private, -1); - bio_put(bio); -} - -static int z_erofs_decompress_pcluster(struct super_block *sb, - struct z_erofs_pcluster *pcl, - struct list_head *pagepool) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct z_erofs_pagevec_ctor ctor; - unsigned int i, outputsize, llen, nr_pages; - struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; - struct page **pages, **compressed_pages, *page; - - enum z_erofs_page_type page_type; - bool overlapped, partial; - struct z_erofs_collection *cl; - int err; - - might_sleep(); - cl = z_erofs_primarycollection(pcl); - DBG_BUGON(!READ_ONCE(cl->nr_pages)); - - mutex_lock(&cl->lock); - nr_pages = cl->nr_pages; - - if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { - pages = pages_onstack; - } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && - mutex_trylock(&z_pagemap_global_lock)) { - pages = z_pagemap_global; - } else { - gfp_t gfp_flags = GFP_KERNEL; - - if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) - gfp_flags |= __GFP_NOFAIL; - - pages = kvmalloc_array(nr_pages, sizeof(struct page *), - gfp_flags); - - /* fallback to global pagemap for the lowmem scenario */ - if (unlikely(!pages)) { - mutex_lock(&z_pagemap_global_lock); - pages = z_pagemap_global; - } - } - - for (i = 0; i < nr_pages; ++i) - pages[i] = NULL; - - err = 0; - z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, 0); - - for (i = 0; i < cl->vcnt; ++i) { - unsigned int pagenr; - - page = z_erofs_pagevec_dequeue(&ctor, &page_type); - - /* all pages in pagevec ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) - pagenr = 0; - else - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - - /* - * currently EROFS doesn't support multiref(dedup), - * so here erroring out one multiref page. - */ - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - } - z_erofs_pagevec_ctor_exit(&ctor, true); - - overlapped = false; - compressed_pages = pcl->compressed_pages; - - for (i = 0; i < clusterpages; ++i) { - unsigned int pagenr; - - page = compressed_pages[i]; - - /* all compressed pages ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (!z_erofs_page_is_staging(page)) { - if (erofs_page_is_managed(sbi, page)) { - if (unlikely(!PageUptodate(page))) - err = -EIO; - continue; - } - - /* - * only if non-head page can be selected - * for inplace decompression - */ - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - - overlapped = true; - } - - /* PG_error needs checking for inplaced and staging pages */ - if (unlikely(PageError(page))) { - DBG_BUGON(PageUptodate(page)); - err = -EIO; - } - } - - if (unlikely(err)) - goto out; - - llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; - if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { - outputsize = llen; - partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); - } else { - outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; - partial = true; - } - - err = z_erofs_decompress(&(struct z_erofs_decompress_req) { - .sb = sb, - .in = compressed_pages, - .out = pages, - .pageofs_out = cl->pageofs, - .inputsize = PAGE_SIZE, - .outputsize = outputsize, - .alg = pcl->algorithmformat, - .inplace_io = overlapped, - .partial_decoding = partial - }, pagepool); - -out: - /* must handle all compressed pages before endding pages */ - for (i = 0; i < clusterpages; ++i) { - page = compressed_pages[i]; - - if (erofs_page_is_managed(sbi, page)) - continue; - - /* recycle all individual staging pages */ - (void)z_erofs_put_stagingpage(pagepool, page); - - WRITE_ONCE(compressed_pages[i], NULL); - } - - for (i = 0; i < nr_pages; ++i) { - page = pages[i]; - if (!page) - continue; - - DBG_BUGON(!page->mapping); - - /* recycle all individual staging pages */ - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (unlikely(err < 0)) - SetPageError(page); - - z_erofs_onlinepage_endio(page); - } - - if (pages == z_pagemap_global) - mutex_unlock(&z_pagemap_global_lock); - else if (unlikely(pages != pages_onstack)) - kvfree(pages); - - cl->nr_pages = 0; - cl->vcnt = 0; - - /* all cl locks MUST be taken before the following line */ - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); - - /* all cl locks SHOULD be released right now */ - mutex_unlock(&cl->lock); - - z_erofs_collection_put(cl); - return err; -} - -static void z_erofs_vle_unzip_all(struct super_block *sb, - struct z_erofs_unzip_io *io, - struct list_head *pagepool) -{ - z_erofs_next_pcluster_t owned = io->head; - - while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { - struct z_erofs_pcluster *pcl; - - /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); - - /* no possible that 'owned' equals NULL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned, struct z_erofs_pcluster, next); - owned = READ_ONCE(pcl->next); - - z_erofs_decompress_pcluster(sb, pcl, pagepool); - } -} - -static void z_erofs_vle_unzip_wq(struct work_struct *work) -{ - struct z_erofs_unzip_io_sb *iosb = - container_of(work, struct z_erofs_unzip_io_sb, io.u.work); - LIST_HEAD(pagepool); - - DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); - - put_pages_list(&pagepool); - kvfree(iosb); -} - -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct list_head *pagepool, - struct address_space *mc, - gfp_t gfp) -{ - /* determined at compile time to avoid too many #ifdefs */ - const bool nocache = __builtin_constant_p(mc) ? !mc : false; - const pgoff_t index = pcl->obj.index; - bool tocache = false; - - struct address_space *mapping; - struct page *oldpage, *page; - - compressed_page_t t; - int justfound; - -repeat: - page = READ_ONCE(pcl->compressed_pages[nr]); - oldpage = page; - - if (!page) - goto out_allocpage; - - /* - * the cached page has not been allocated and - * an placeholder is out there, prepare it now. - */ - if (!nocache && page == PAGE_UNALLOCATED) { - tocache = true; - goto out_allocpage; - } - - /* process the target tagged pointer */ - t = tagptr_init(compressed_page_t, page); - justfound = tagptr_unfold_tags(t); - page = tagptr_unfold_ptr(t); - - mapping = READ_ONCE(page->mapping); - - /* - * if managed cache is disabled, it's no way to - * get such a cached-like page. - */ - if (nocache) { - /* if managed cache is disabled, it is impossible `justfound' */ - DBG_BUGON(justfound); - - /* and it should be locked, not uptodate, and not truncated */ - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!mapping); - goto out; - } - - /* - * unmanaged (file) pages are all locked solidly, - * therefore it is impossible for `mapping' to be NULL. - */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - lock_page(page); - - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - - /* the page is still in manage cache */ - if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_pages[nr], page); - - ClearPageError(page); - if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_pages[]. - */ - DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); - } - - /* no need to submit io if it is already up-to-date */ - if (PageUptodate(page)) { - unlock_page(page); - page = NULL; - } - goto out; - } - - /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. - */ - DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - - tocache = true; - unlock_page(page); - put_page(page); -out_allocpage: - page = __stagingpage_alloc(pagepool, gfp); - if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { - list_add(&page->lru, pagepool); - cpu_relax(); - goto repeat; - } - if (nocache || !tocache) - goto out; - if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { - page->mapping = Z_EROFS_MAPPING_STAGING; - goto out; - } - - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); -out: /* the only exit (for tracing and debugging) */ - return page; -} - -static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, - struct z_erofs_unzip_io *io, - bool foreground) -{ - struct z_erofs_unzip_io_sb *iosb; - - if (foreground) { - /* waitqueue available for foreground io */ - DBG_BUGON(!io); - - init_waitqueue_head(&io->u.wait); - atomic_set(&io->pending_bios, 0); - goto out; - } - - iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); - DBG_BUGON(!iosb); - - /* initialize fields in the allocated descriptor */ - io = &iosb->io; - iosb->sb = sb; - INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); -out: - io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - return io; -} - -/* define decompression jobqueue types */ -enum { - JQ_BYPASS, - JQ_SUBMIT, - NR_JOBQUEUES, -}; - -static void *jobqueueset_init(struct super_block *sb, - z_erofs_next_pcluster_t qtail[], - struct z_erofs_unzip_io *q[], - struct z_erofs_unzip_io *fgq, - bool forcefg) -{ - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ - q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); - qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; - - q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); - qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; - - return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); -} - -static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t qtail[], - z_erofs_next_pcluster_t owned_head) -{ - z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; - z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - if (owned_head == Z_EROFS_PCLUSTER_TAIL) - owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); - - WRITE_ONCE(*submit_qtail, owned_head); - WRITE_ONCE(*bypass_qtail, &pcl->next); - - qtail[JQ_BYPASS] = &pcl->next; -} - -static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], - unsigned int nr_bios, - bool force_fg) -{ - /* - * although background is preferred, no one is pending for submission. - * don't issue workqueue for decompression but drop it directly instead. - */ - if (force_fg || nr_bios) - return false; - - kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); - return true; -} - -static bool z_erofs_vle_submit_all(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, - struct list_head *pagepool, - struct z_erofs_unzip_io *fgq, - bool force_fg) -{ - struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); - z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; - struct z_erofs_unzip_io *q[NR_JOBQUEUES]; - struct bio *bio; - void *bi_private; - /* since bio will be NULL, no need to initialize last_index */ - pgoff_t uninitialized_var(last_index); - bool force_submit = false; - unsigned int nr_bios; - - if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) - return false; - - force_submit = false; - bio = NULL; - nr_bios = 0; - bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); - - /* by default, all need io submission */ - q[JQ_SUBMIT]->head = owned_head; - - do { - struct z_erofs_pcluster *pcl; - unsigned int clusterpages; - pgoff_t first_index; - struct page *page; - unsigned int i = 0, bypass = 0; - int err; - - /* no possible 'owned_head' equals the following */ - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned_head, struct z_erofs_pcluster, next); - - clusterpages = BIT(pcl->clusterbits); - - /* close the main owned chain at first */ - owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - Z_EROFS_PCLUSTER_TAIL_CLOSED); - - first_index = pcl->obj.index; - force_submit |= (first_index != last_index + 1); - -repeat: - page = pickup_page_for_submission(pcl, i, pagepool, - MNGD_MAPPING(sbi), - GFP_NOFS); - if (!page) { - force_submit = true; - ++bypass; - goto skippage; - } - - if (bio && force_submit) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - bio = erofs_grab_bio(sb, first_index + i, - BIO_MAX_PAGES, bi_private, - z_erofs_vle_read_endio, true); - ++nr_bios; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (err < PAGE_SIZE) - goto submit_bio_retry; - - force_submit = false; - last_index = first_index + i; -skippage: - if (++i < clusterpages) - goto repeat; - - if (bypass < clusterpages) - qtail[JQ_SUBMIT] = &pcl->next; - else - move_to_bypass_jobqueue(pcl, qtail, owned_head); - } while (owned_head != Z_EROFS_PCLUSTER_TAIL); - - if (bio) - __submit_bio(bio, REQ_OP_READ, 0); - - if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) - return true; - - z_erofs_vle_unzip_kickoff(bi_private, nr_bios); - return true; -} - -static void z_erofs_submit_and_unzip(struct super_block *sb, - struct z_erofs_collector *clt, - struct list_head *pagepool, - bool force_fg) -{ - struct z_erofs_unzip_io io[NR_JOBQUEUES]; - - if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, force_fg)) - return; - - /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); - - if (!force_fg) - return; - - /* wait until all bios are completed */ - wait_event(io[JQ_SUBMIT].u.wait, - !atomic_read(&io[JQ_SUBMIT].pending_bios)); - - /* let's synchronous decompression */ - z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); -} - -static int z_erofs_vle_normalaccess_readpage(struct file *file, - struct page *page) -{ - struct inode *const inode = page->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - int err; - LIST_HEAD(pagepool); - - trace_erofs_readpage(page, false); - - f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; - - err = z_erofs_do_read_page(&f, page, &pagepool); - (void)z_erofs_collector_end(&f.clt); - - /* if some compressed cluster ready, need submit them anyway */ - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); - - if (err) - errln("%s, failed to read, err [%d]", __func__, err); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return err; -} - -static bool should_decompress_synchronously(struct erofs_sb_info *sbi, - unsigned int nr) -{ - return nr <= sbi->max_sync_decompress_pages; -} - -static int z_erofs_vle_normalaccess_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - struct inode *const inode = mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - bool sync = should_decompress_synchronously(sbi, nr_pages); - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); - struct page *head = NULL; - LIST_HEAD(pagepool); - - trace_erofs_readpages(mapping->host, lru_to_page(pages), - nr_pages, false); - - f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; - - for (; nr_pages; --nr_pages) { - struct page *page = lru_to_page(pages); - - prefetchw(&page->flags); - list_del(&page->lru); - - /* - * A pure asynchronous readahead is indicated if - * a PG_readahead marked page is hitted at first. - * Let's also do asynchronous decompression for this case. - */ - sync &= !(PageReadahead(page) && !head); - - if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { - list_add(&page->lru, &pagepool); - continue; - } - - set_page_private(page, (unsigned long)head); - head = page; - } - - while (head) { - struct page *page = head; - int err; - - /* traversal in reverse order */ - head = (void *)page_private(page); - - err = z_erofs_do_read_page(&f, page, &pagepool); - if (err) { - struct erofs_vnode *vi = EROFS_V(inode); - - errln("%s, readahead error at page %lu of nid %llu", - __func__, page->index, vi->nid); - } - put_page(page); - } - - (void)z_erofs_collector_end(&f.clt); - - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return 0; -} - -const struct address_space_operations z_erofs_vle_normalaccess_aops = { - .readpage = z_erofs_vle_normalaccess_readpage, - .readpages = z_erofs_vle_normalaccess_readpages, -}; - diff --git a/drivers/staging/erofs/zdata.h b/drivers/staging/erofs/zdata.h deleted file mode 100644 index e11fe1959ca2..000000000000 --- a/drivers/staging/erofs/zdata.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zdata.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZDATA_H -#define __EROFS_FS_ZDATA_H - -#include "internal.h" -#include "zpvec.h" - -#define Z_EROFS_NR_INLINE_PAGEVECS 3 - -/* - * Structure fields follow one of the following exclusion rules. - * - * I: Modifiable by initialization/destruction paths and read-only - * for everyone else; - * - * L: Field should be protected by pageset lock; - * - * A: Field should be accessed / updated in atomic for parallelized code. - */ -struct z_erofs_collection { - struct mutex lock; - - /* I: page offset of start position of decompression */ - unsigned short pageofs; - - /* L: maximum relative page index in pagevec[] */ - unsigned short nr_pages; - - /* L: total number of pages in pagevec[] */ - unsigned int vcnt; - - union { - /* L: inline a certain number of pagevecs for bootstrap */ - erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; - - /* I: can be used to free the pcluster by RCU. */ - struct rcu_head rcu; - }; -}; - -#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 -#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 - -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - -struct z_erofs_pcluster { - struct erofs_workgroup obj; - struct z_erofs_collection primary_collection; - - /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; - - /* A: compressed pages (including multi-usage pages) */ - struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; - - /* A: lower limit of decompressed length and if full length or not */ - unsigned int length; - - /* I: compression algorithm format */ - unsigned char algorithmformat; - /* I: bit shift of physical cluster size */ - unsigned char clusterbits; -}; - -#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) - -/* let's avoid the valid 32-bit kernel addresses */ - -/* the chained workgroup has't submitted io (still open) */ -#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) -/* the chained workgroup has already submitted io */ -#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) - -#define Z_EROFS_PCLUSTER_NIL (NULL) - -#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) - -struct z_erofs_unzip_io { - atomic_t pending_bios; - z_erofs_next_pcluster_t head; - - union { - wait_queue_head_t wait; - struct work_struct work; - } u; -}; - -struct z_erofs_unzip_io_sb { - struct z_erofs_unzip_io io; - struct super_block *sb; -}; - -#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) -static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, - struct page *page) -{ - return page->mapping == MNGD_MAPPING(sbi); -} - -#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 -#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) -#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) - -/* - * waiters (aka. ongoing_packs): # to unlock the page - * sub-index: 0 - for partial page, >= 1 full page sub-index - */ -typedef atomic_t z_erofs_onlinepage_t; - -/* type punning */ -union z_erofs_onlinepage_converter { - z_erofs_onlinepage_t *o; - unsigned long *v; -}; - -static inline unsigned int z_erofs_onlinepage_index(struct page *page) -{ - union z_erofs_onlinepage_converter u; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; -} - -static inline void z_erofs_onlinepage_init(struct page *page) -{ - union { - z_erofs_onlinepage_t o; - unsigned long v; - /* keep from being unlocked in advance */ - } u = { .o = ATOMIC_INIT(1) }; - - set_page_private(page, u.v); - smp_wmb(); - SetPagePrivate(page); -} - -static inline void z_erofs_onlinepage_fixup(struct page *page, - uintptr_t index, bool down) -{ - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); - - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { - if (!index) - return; - - DBG_BUGON(id != index); - } - - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) - goto repeat; -} - -static inline void z_erofs_onlinepage_endio(struct page *page) -{ - union z_erofs_onlinepage_converter u; - unsigned int v; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - v = atomic_dec_return(u.o); - if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { - ClearPagePrivate(page); - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); - } - debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); -} - -#define Z_EROFS_VMAP_ONSTACK_PAGES \ - min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) -#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 - -#endif - diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c deleted file mode 100644 index 774dacbc5b32..000000000000 --- a/drivers/staging/erofs/zmap.c +++ /dev/null @@ -1,468 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zmap.c - * - * Copyright (C) 2018-2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include -#include - -int z_erofs_fill_inode(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - - if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { - vi->z_advise = 0; - vi->z_algorithmtype[0] = 0; - vi->z_algorithmtype[1] = 0; - vi->z_logical_clusterbits = LOG_BLOCK_SIZE; - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); - } - - inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; - return 0; -} - -static int fill_inode_lazy(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - int err; - erofs_off_t pos; - struct page *page; - void *kaddr; - struct z_erofs_map_header *h; - - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - err = 0; - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - goto out_unlock; - - DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); - - pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + - vi->xattr_isize, 8); - page = erofs_get_meta_page(sb, erofs_blknr(pos), false); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out_unlock; - } - - kaddr = kmap_atomic(page); - - h = kaddr + erofs_blkoff(pos); - vi->z_advise = le16_to_cpu(h->h_advise); - vi->z_algorithmtype[0] = h->h_algorithmtype & 15; - vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; - - if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { - errln("unknown compression format %u for nid %llu, please upgrade kernel", - vi->z_algorithmtype[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 3) & 3); - - if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { - errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", - vi->z_physical_clusterbits[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 5) & 7); - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); -unmap_done: - kunmap_atomic(kaddr); - unlock_page(page); - put_page(page); -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); - return err; -} - -struct z_erofs_maprecorder { - struct inode *inode; - struct erofs_map_blocks *map; - void *kaddr; - - unsigned long lcn; - /* compression extent information gathered */ - u8 type; - u16 clusterofs; - u16 delta[2]; - erofs_blk_t pblk; -}; - -static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, - erofs_blk_t eblk) -{ - struct super_block *const sb = m->inode->i_sb; - struct erofs_map_blocks *const map = m->map; - struct page *mpage = map->mpage; - - if (mpage) { - if (mpage->index == eblk) { - if (!m->kaddr) - m->kaddr = kmap_atomic(mpage); - return 0; - } - - if (m->kaddr) { - kunmap_atomic(m->kaddr); - m->kaddr = NULL; - } - put_page(mpage); - } - - mpage = erofs_get_meta_page(sb, eblk, false); - if (IS_ERR(mpage)) { - map->mpage = NULL; - return PTR_ERR(mpage); - } - m->kaddr = kmap_atomic(mpage); - unlock_page(mpage); - map->mpage = mpage; - return 0; -} - -static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); - const erofs_off_t pos = - Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + - vi->xattr_isize) + - lcn * sizeof(struct z_erofs_vle_decompressed_index); - struct z_erofs_vle_decompressed_index *di; - unsigned int advise, type; - int err; - - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - - m->lcn = lcn; - di = m->kaddr + erofs_blkoff(pos); - - advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); - switch (type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - m->clusterofs = 1 << vi->z_logical_clusterbits; - m->delta[0] = le16_to_cpu(di->di_u.delta[0]); - m->delta[1] = le16_to_cpu(di->di_u.delta[1]); - break; - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - m->clusterofs = le16_to_cpu(di->di_clusterofs); - m->pblk = le32_to_cpu(di->di_u.blkaddr); - break; - default: - DBG_BUGON(1); - return -EOPNOTSUPP; - } - m->type = type; - return 0; -} - -static unsigned int decode_compactedbits(unsigned int lobits, - unsigned int lomask, - u8 *in, unsigned int pos, u8 *type) -{ - const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); - const unsigned int lo = v & lomask; - - *type = (v >> lobits) & 3; - return lo; -} - -static int unpack_compacted_index(struct z_erofs_maprecorder *m, - unsigned int amortizedshift, - unsigned int eofs) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const unsigned int lomask = (1 << lclusterbits) - 1; - unsigned int vcnt, base, lo, encodebits, nblk; - int i; - u8 *in, type; - - if (1 << amortizedshift == 4) - vcnt = 2; - else if (1 << amortizedshift == 2 && lclusterbits == 12) - vcnt = 16; - else - return -EOPNOTSUPP; - - encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; - base = round_down(eofs, vcnt << amortizedshift); - in = m->kaddr + base; - - i = (eofs - base) >> amortizedshift; - - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - m->type = type; - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { - m->clusterofs = 1 << lclusterbits; - if (i + 1 != vcnt) { - m->delta[0] = lo; - return 0; - } - /* - * since the last lcluster in the pack is special, - * of which lo saves delta[1] rather than delta[0]. - * Hence, get delta[0] by the previous lcluster indirectly. - */ - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * (i - 1), &type); - if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - lo = 0; - m->delta[0] = lo + 1; - return 0; - } - m->clusterofs = lo; - m->delta[0] = 0; - /* figout out blkaddr (pblk) for HEAD lclusters */ - nblk = 1; - while (i > 0) { - --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - i -= lo; - - if (i >= 0) - ++nblk; - } - in += (vcnt << amortizedshift) - sizeof(__le32); - m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; - return 0; -} - -static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + - vi->inode_isize + vi->xattr_isize, 8) + - sizeof(struct z_erofs_map_header); - const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); - unsigned int compacted_4b_initial, compacted_2b; - unsigned int amortizedshift; - erofs_off_t pos; - int err; - - if (lclusterbits != 12) - return -EOPNOTSUPP; - - if (lcn >= totalidx) - return -EINVAL; - - m->lcn = lcn; - /* used to align to 32-byte (compacted_2b) alignment */ - compacted_4b_initial = (32 - ebase % 32) / 4; - if (compacted_4b_initial == 32 / 4) - compacted_4b_initial = 0; - - if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) - compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); - else - compacted_2b = 0; - - pos = ebase; - if (lcn < compacted_4b_initial) { - amortizedshift = 2; - goto out; - } - pos += compacted_4b_initial * 4; - lcn -= compacted_4b_initial; - - if (lcn < compacted_2b) { - amortizedshift = 1; - goto out; - } - pos += compacted_2b * 2; - lcn -= compacted_2b; - amortizedshift = 2; -out: - pos += lcn * (1 << amortizedshift); - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); -} - -static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned int lcn) -{ - const unsigned int datamode = EROFS_V(m->inode)->datamode; - - if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) - return vle_legacy_load_cluster_from_disk(m, lcn); - - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return compacted_load_cluster_from_disk(m, lcn); - - return -EINVAL; -} - -static int vle_extent_lookback(struct z_erofs_maprecorder *m, - unsigned int lookback_distance) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - struct erofs_map_blocks *const map = m->map; - const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned long lcn = m->lcn; - int err; - - if (lcn < lookback_distance) { - errln("bogus lookback distance @ nid %llu", vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* load extent head logical cluster if needed */ - lcn -= lookback_distance; - err = vle_load_cluster_from_disk(m, lcn); - if (err) - return err; - - switch (m->type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - if (unlikely(!m->delta[0])) { - errln("invalid lookback distance 0 at nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - return vle_extent_lookback(m, m->delta[0]); - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - map->m_la = (lcn << lclusterbits) | m->clusterofs; - break; - default: - errln("unknown type %u at lcn %lu of nid %llu", - m->type, lcn, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - return 0; -} - -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct z_erofs_maprecorder m = { - .inode = inode, - .map = map, - }; - int err = 0; - unsigned int lclusterbits, endoff; - unsigned long long ofs, end; - - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); - - /* when trying to read beyond EOF, leave it unmapped */ - if (unlikely(map->m_la >= inode->i_size)) { - map->m_llen = map->m_la + 1 - inode->i_size; - map->m_la = inode->i_size; - map->m_flags = 0; - goto out; - } - - err = fill_inode_lazy(inode); - if (err) - goto out; - - lclusterbits = vi->z_logical_clusterbits; - ofs = map->m_la; - m.lcn = ofs >> lclusterbits; - endoff = ofs & ((1 << lclusterbits) - 1); - - err = vle_load_cluster_from_disk(&m, m.lcn); - if (err) - goto unmap_out; - - map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ - end = (m.lcn + 1ULL) << lclusterbits; - - switch (m.type) { - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - if (endoff >= m.clusterofs) - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - if (endoff >= m.clusterofs) { - map->m_la = (m.lcn << lclusterbits) | m.clusterofs; - break; - } - /* m.lcn should be >= 1 if endoff < m.clusterofs */ - if (unlikely(!m.lcn)) { - errln("invalid logical cluster 0 at nid %llu", - vi->nid); - err = -EFSCORRUPTED; - goto unmap_out; - } - end = (m.lcn << lclusterbits) | m.clusterofs; - map->m_flags |= EROFS_MAP_FULL_MAPPED; - m.delta[0] = 1; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - /* get the correspoinding first chunk */ - err = vle_extent_lookback(&m, m.delta[0]); - if (unlikely(err)) - goto unmap_out; - break; - default: - errln("unknown type %u at offset %llu of nid %llu", - m.type, ofs, vi->nid); - err = -EOPNOTSUPP; - goto unmap_out; - } - - map->m_llen = end - map->m_la; - map->m_plen = 1 << lclusterbits; - map->m_pa = blknr_to_addr(m.pblk); - map->m_flags |= EROFS_MAP_MAPPED; - -unmap_out: - if (m.kaddr) - kunmap_atomic(m.kaddr); - -out: - debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", - __func__, map->m_la, map->m_pa, - map->m_llen, map->m_plen, map->m_flags); - - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); - - /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ - DBG_BUGON(err < 0 && err != -ENOMEM); - return err; -} - diff --git a/drivers/staging/erofs/zpvec.h b/drivers/staging/erofs/zpvec.h deleted file mode 100644 index 9798f5627786..000000000000 --- a/drivers/staging/erofs/zpvec.h +++ /dev/null @@ -1,159 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zpvec.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZPVEC_H -#define __EROFS_FS_ZPVEC_H - -#include "tagptr.h" - -/* page type in pagevec for decompress subsystem */ -enum z_erofs_page_type { - /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ - Z_EROFS_PAGE_TYPE_EXCLUSIVE, - - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, - - Z_EROFS_VLE_PAGE_TYPE_HEAD, - Z_EROFS_VLE_PAGE_TYPE_MAX -}; - -extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") - __bad_page_type_exclusive(void); - -/* pagevec tagged pointer */ -typedef tagptr2_t erofs_vtptr_t; - -/* pagevec collector */ -struct z_erofs_pagevec_ctor { - struct page *curr, *next; - erofs_vtptr_t *pages; - - unsigned int nr, index; -}; - -static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - if (!ctor->curr) - return; - - if (atomic) - kunmap_atomic(ctor->pages); - else - kunmap(ctor->curr); -} - -static inline struct page * -z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr) -{ - unsigned int index; - - /* keep away from occupied pages */ - if (ctor->next) - return ctor->next; - - for (index = 0; index < nr; ++index) { - const erofs_vtptr_t t = ctor->pages[index]; - const unsigned int tags = tagptr_unfold_tags(t); - - if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) - return tagptr_unfold_ptr(t); - } - DBG_BUGON(nr >= ctor->nr); - return NULL; -} - -static inline void -z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); - - z_erofs_pagevec_ctor_exit(ctor, atomic); - - ctor->curr = next; - ctor->next = NULL; - ctor->pages = atomic ? - kmap_atomic(ctor->curr) : kmap(ctor->curr); - - ctor->nr = PAGE_SIZE / sizeof(struct page *); - ctor->index = 0; -} - -static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr, - erofs_vtptr_t *pages, - unsigned int i) -{ - ctor->nr = nr; - ctor->curr = ctor->next = NULL; - ctor->pages = pages; - - if (i >= nr) { - i -= nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - while (i > ctor->nr) { - i -= ctor->nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - } - } - ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); - ctor->index = i; -} - -static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, - struct page *page, - enum z_erofs_page_type type, - bool *occupied) -{ - *occupied = false; - if (unlikely(!ctor->next && type)) - if (ctor->index + 1 == ctor->nr) - return false; - - if (unlikely(ctor->index >= ctor->nr)) - z_erofs_pagevec_ctor_pagedown(ctor, false); - - /* exclusive page type must be 0 */ - if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) - __bad_page_type_exclusive(); - - /* should remind that collector->next never equal to 1, 2 */ - if (type == (uintptr_t)ctor->next) { - ctor->next = page; - *occupied = true; - } - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); - return true; -} - -static inline struct page * -z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, - enum z_erofs_page_type *type) -{ - erofs_vtptr_t t; - - if (unlikely(ctor->index >= ctor->nr)) { - DBG_BUGON(!ctor->next); - z_erofs_pagevec_ctor_pagedown(ctor, true); - } - - t = ctor->pages[ctor->index]; - - *type = tagptr_unfold_tags(t); - - /* should remind that collector->next never equal to 1, 2 */ - if (*type == (uintptr_t)ctor->next) - ctor->next = tagptr_unfold_ptr(t); - - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); - return tagptr_unfold_ptr(t); -} -#endif - diff --git a/fs/Kconfig b/fs/Kconfig index bfb1c6095c7a..669d46550e6d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -261,6 +261,7 @@ source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" +source "fs/erofs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Makefile b/fs/Makefile index d60089fd689b..b2e4973a0bea 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -130,3 +130,4 @@ obj-$(CONFIG_F2FS_FS) += f2fs/ obj-$(CONFIG_CEPH_FS) += ceph/ obj-$(CONFIG_PSTORE) += pstore/ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ +obj-$(CONFIG_EROFS_FS) += erofs/ diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig new file mode 100644 index 000000000000..16316d1adca3 --- /dev/null +++ b/fs/erofs/Kconfig @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config EROFS_FS + tristate "EROFS filesystem support" + depends on BLOCK + help + EROFS (Enhanced Read-Only File System) is a lightweight + read-only file system with modern designs (eg. page-sized + blocks, inline xattrs/data, etc.) for scenarios which need + high-performance read-only requirements, e.g. Android OS + for mobile phones and LIVECDs. + + It also provides fixed-sized output compression support, + which improves storage density, keeps relatively higher + compression ratios, which is more useful to achieve high + performance for embedded devices with limited memory. + + If unsure, say N. + +config EROFS_FS_DEBUG + bool "EROFS debugging feature" + depends on EROFS_FS + help + Print debugging messages and enable more BUG_ONs which check + filesystem consistency and find potential issues aggressively, + which can be used for Android eng build, for example. + + For daily use, say N. + +config EROFS_FAULT_INJECTION + bool "EROFS fault injection facility" + depends on EROFS_FS + help + Test EROFS to inject faults such as ENOMEM, EIO, and so on. + If unsure, say N. + +config EROFS_FS_XATTR + bool "EROFS extended attributes" + depends on EROFS_FS + default y + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config EROFS_FS_POSIX_ACL + bool "EROFS Access Control Lists" + depends on EROFS_FS_XATTR + select FS_POSIX_ACL + default y + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the POSIX ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N. + +config EROFS_FS_SECURITY + bool "EROFS Security Labels" + depends on EROFS_FS_XATTR + default y + help + Security labels provide an access control facility to support Linux + Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO + Linux. This option enables an extended attribute handler for file + security labels in the erofs filesystem, so that it requires enabling + the extended attribute support in advance. + + If you are not using a security module, say N. + +config EROFS_FS_ZIP + bool "EROFS Data Compression Support" + depends on EROFS_FS + select LZ4_DECOMPRESS + default y + help + Enable fixed-sized output compression for EROFS. + + If you don't want to enable compression feature, say N. + +config EROFS_FS_CLUSTER_PAGE_LIMIT + int "EROFS Cluster Pages Hard Limit" + depends on EROFS_FS_ZIP + range 1 256 + default "1" + help + Indicates maximum # of pages of a compressed + physical cluster. + + For example, if files in a image were compressed + into 8k-unit, hard limit should not be configured + less than 2. Otherwise, the image will be refused + to mount on this kernel. + diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile new file mode 100644 index 000000000000..46f2aa4ba46c --- /dev/null +++ b/fs/erofs/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +EROFS_VERSION = "1.0" + +ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" + +obj-$(CONFIG_EROFS_FS) += erofs.o +erofs-objs := super.o inode.o data.o namei.o dir.o utils.o +erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o + diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h new file mode 100644 index 000000000000..07d279fd5d67 --- /dev/null +++ b/fs/erofs/compress.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_COMPRESS_H +#define __EROFS_FS_COMPRESS_H + +#include "internal.h" + +enum { + Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, + Z_EROFS_COMPRESSION_RUNTIME_MAX +}; + +struct z_erofs_decompress_req { + struct super_block *sb; + struct page **in, **out; + + unsigned short pageofs_out; + unsigned int inputsize, outputsize; + + /* indicate the algorithm will be used for decompression */ + unsigned int alg; + bool inplace_io, partial_decoding; +}; + +/* + * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - + * used to mark temporary allocated pages from other + * file/cached pages and NULL mapping pages. + */ +#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) + +/* check if a page is marked as staging */ +static inline bool z_erofs_page_is_staging(struct page *page) +{ + return page->mapping == Z_EROFS_MAPPING_STAGING; +} + +static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, + struct page *page) +{ + if (!z_erofs_page_is_staging(page)) + return false; + + /* staging pages should not be used by others at the same time */ + if (page_ref_count(page) > 1) + put_page(page); + else + list_add(&page->lru, pagepool); + return true; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + +#endif + diff --git a/fs/erofs/data.c b/fs/erofs/data.c new file mode 100644 index 000000000000..fda16ec8863e --- /dev/null +++ b/fs/erofs/data.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +#include + +static inline void read_endio(struct bio *bio) +{ + struct super_block *const sb = bio->bi_private; + struct bio_vec *bvec; + blk_status_t err = bio->bi_status; + struct bvec_iter_all iter_all; + + if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + /* page is already locked */ + DBG_BUGON(PageUptodate(page)); + + if (unlikely(err)) + SetPageError(page); + else + SetPageUptodate(page); + + unlock_page(page); + /* page could be reclaimed now */ + } + bio_put(bio); +} + +/* prio -- true is used for dir */ +struct page *__erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio, bool nofail) +{ + struct inode *const bd_inode = sb->s_bdev->bd_inode; + struct address_space *const mapping = bd_inode->i_mapping; + /* prefer retrying in the allocator to blindly looping below */ + const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | + (nofail ? __GFP_NOFAIL : 0); + unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; + struct page *page; + int err; + +repeat: + page = find_or_create_page(mapping, blkaddr, gfp); + if (unlikely(!page)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + DBG_BUGON(!PageLocked(page)); + + if (!PageUptodate(page)) { + struct bio *bio; + + bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); + if (IS_ERR(bio)) { + DBG_BUGON(nofail); + err = PTR_ERR(bio); + goto err_out; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (unlikely(err != PAGE_SIZE)) { + err = -EFAULT; + goto err_out; + } + + __submit_bio(bio, REQ_OP_READ, + REQ_META | (prio ? REQ_PRIO : 0)); + + lock_page(page); + + /* this page has been truncated by others */ + if (unlikely(page->mapping != mapping)) { +unlock_repeat: + unlock_page(page); + put_page(page); + goto repeat; + } + + /* more likely a read error */ + if (unlikely(!PageUptodate(page))) { + if (io_retries) { + --io_retries; + goto unlock_repeat; + } + err = -EIO; + goto err_out; + } + } + return page; + +err_out: + unlock_page(page); + put_page(page); + return ERR_PTR(err); +} + +static int erofs_map_blocks_flatmode(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + int err = 0; + erofs_blk_t nblocks, lastblk; + u64 offset = map->m_la; + struct erofs_vnode *vi = EROFS_V(inode); + + trace_erofs_map_blocks_flatmode_enter(inode, map, flags); + + nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); + lastblk = nblocks - is_inode_flat_inline(inode); + + if (unlikely(offset >= inode->i_size)) { + /* leave out-of-bound access unmapped */ + map->m_flags = 0; + map->m_plen = 0; + goto out; + } + + /* there is no hole in flatmode */ + map->m_flags = EROFS_MAP_MAPPED; + + if (offset < blknr_to_addr(lastblk)) { + map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; + map->m_plen = blknr_to_addr(lastblk) - offset; + } else if (is_inode_flat_inline(inode)) { + /* 2 - inode inline B: inode, [xattrs], inline last blk... */ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + + vi->xattr_isize + erofs_blkoff(map->m_la); + map->m_plen = inode->i_size - offset; + + /* inline data should be located in one meta block */ + if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + err = -EFSCORRUPTED; + goto err_out; + } + + map->m_flags |= EROFS_MAP_META; + } else { + errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", + vi->nid, inode->i_size, map->m_la); + DBG_BUGON(1); + err = -EIO; + goto err_out; + } + +out: + map->m_llen = map->m_plen; + +err_out: + trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); + return err; +} + +int erofs_map_blocks(struct inode *inode, + struct erofs_map_blocks *map, int flags) +{ + if (unlikely(is_inode_layout_compression(inode))) { + int err = z_erofs_map_blocks_iter(inode, map, flags); + + if (map->mpage) { + put_page(map->mpage); + map->mpage = NULL; + } + return err; + } + return erofs_map_blocks_flatmode(inode, map, flags); +} + +static inline struct bio *erofs_read_raw_page(struct bio *bio, + struct address_space *mapping, + struct page *page, + erofs_off_t *last_block, + unsigned int nblocks, + bool ra) +{ + struct inode *const inode = mapping->host; + struct super_block *const sb = inode->i_sb; + erofs_off_t current_block = (erofs_off_t)page->index; + int err; + + DBG_BUGON(!nblocks); + + if (PageUptodate(page)) { + err = 0; + goto has_updated; + } + + /* note that for readpage case, bio also equals to NULL */ + if (bio && + /* not continuous */ + *last_block + 1 != current_block) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + struct erofs_map_blocks map = { + .m_la = blknr_to_addr(current_block), + }; + erofs_blk_t blknr; + unsigned int blkoff; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (unlikely(err)) + goto err_out; + + /* zero out the holed page */ + if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, 0, PAGE_SIZE); + SetPageUptodate(page); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* for RAW access mode, m_plen must be equal to m_llen */ + DBG_BUGON(map.m_plen != map.m_llen); + + blknr = erofs_blknr(map.m_pa); + blkoff = erofs_blkoff(map.m_pa); + + /* deal with inline page */ + if (map.m_flags & EROFS_MAP_META) { + void *vsrc, *vto; + struct page *ipage; + + DBG_BUGON(map.m_plen > PAGE_SIZE); + + ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); + + if (IS_ERR(ipage)) { + err = PTR_ERR(ipage); + goto err_out; + } + + vsrc = kmap_atomic(ipage); + vto = kmap_atomic(page); + memcpy(vto, vsrc + blkoff, map.m_plen); + memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); + kunmap_atomic(vto); + kunmap_atomic(vsrc); + flush_dcache_page(page); + + SetPageUptodate(page); + /* TODO: could we unlock the page earlier? */ + unlock_page(ipage); + put_page(ipage); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* pa must be block-aligned for raw reading */ + DBG_BUGON(erofs_blkoff(map.m_pa)); + + /* max # of continuous pages */ + if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) + nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); + if (nblocks > BIO_MAX_PAGES) + nblocks = BIO_MAX_PAGES; + + bio = erofs_grab_bio(sb, blknr, nblocks, sb, + read_endio, false); + if (IS_ERR(bio)) { + err = PTR_ERR(bio); + bio = NULL; + goto err_out; + } + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + /* out of the extent or bio is full */ + if (err < PAGE_SIZE) + goto submit_bio_retry; + + *last_block = current_block; + + /* shift in advance in case of it followed by too many gaps */ + if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { + /* err should reassign to 0 after submitting */ + err = 0; + goto submit_bio_out; + } + + return bio; + +err_out: + /* for sync reading, set page error immediately */ + if (!ra) { + SetPageError(page); + ClearPageUptodate(page); + } +has_updated: + unlock_page(page); + + /* if updated manually, continuous pages has a gap */ + if (bio) +submit_bio_out: + __submit_bio(bio, REQ_OP_READ, 0); + + return unlikely(err) ? ERR_PTR(err) : NULL; +} + +/* + * since we dont have write or truncate flows, so no inode + * locking needs to be held at the moment. + */ +static int erofs_raw_access_readpage(struct file *file, struct page *page) +{ + erofs_off_t last_block; + struct bio *bio; + + trace_erofs_readpage(page, true); + + bio = erofs_read_raw_page(NULL, page->mapping, + page, &last_block, 1, false); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ + return 0; +} + +static int erofs_raw_access_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + erofs_off_t last_block; + struct bio *bio = NULL; + gfp_t gfp = readahead_gfp_mask(mapping); + struct page *page = list_last_entry(pages, struct page, lru); + + trace_erofs_readpages(mapping->host, page, nr_pages, true); + + for (; nr_pages; --nr_pages) { + page = list_entry(pages->prev, struct page, lru); + + prefetchw(&page->flags); + list_del(&page->lru); + + if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { + bio = erofs_read_raw_page(bio, mapping, page, + &last_block, nr_pages, true); + + /* all the page errors are ignored when readahead */ + if (IS_ERR(bio)) { + pr_err("%s, readahead error at page %lu of nid %llu\n", + __func__, page->index, + EROFS_V(mapping->host)->nid); + + bio = NULL; + } + } + + /* pages could still be locked */ + put_page(page); + } + DBG_BUGON(!list_empty(pages)); + + /* the rare case (end in gaps) */ + if (unlikely(bio)) + __submit_bio(bio, REQ_OP_READ, 0); + return 0; +} + +static int erofs_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + struct erofs_map_blocks map = { + .m_la = iblock << 9, + }; + int err; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (err) + return err; + + if (map.m_flags & EROFS_MAP_MAPPED) + bh->b_blocknr = erofs_blknr(map.m_pa); + + return err; +} + +static sector_t erofs_bmap(struct address_space *mapping, sector_t block) +{ + struct inode *inode = mapping->host; + + if (is_inode_flat_inline(inode)) { + erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; + + if (block >> LOG_SECTORS_PER_BLOCK >= blks) + return 0; + } + + return generic_block_bmap(mapping, block, erofs_get_block); +} + +/* for uncompressed (aligned) files and raw access for other files */ +const struct address_space_operations erofs_raw_access_aops = { + .readpage = erofs_raw_access_readpage, + .readpages = erofs_raw_access_readpages, + .bmap = erofs_bmap, +}; + diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c new file mode 100644 index 000000000000..5f4b7f302863 --- /dev/null +++ b/fs/erofs/decompressor.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "compress.h" +#include +#include + +#ifndef LZ4_DISTANCE_MAX /* history window size */ +#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) +#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN +#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) +#endif + +struct z_erofs_decompressor { + /* + * if destpages have sparsed pages, fill them with bounce pages. + * it also check whether destpages indicate continuous physical memory. + */ + int (*prepare_destpages)(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); + char *name; +}; + +static bool use_vmap; +module_param(use_vmap, bool, 0444); +MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); + +static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nr = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; + unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, + BITS_PER_LONG)] = { 0 }; + void *kaddr = NULL; + unsigned int i, j, top; + + top = 0; + for (i = j = 0; i < nr; ++i, ++j) { + struct page *const page = rq->out[i]; + struct page *victim; + + if (j >= LZ4_MAX_DISTANCE_PAGES) + j = 0; + + /* 'valid' bounced can only be tested after a complete round */ + if (test_bit(j, bounced)) { + DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); + DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); + availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; + } + + if (page) { + __clear_bit(j, bounced); + if (kaddr) { + if (kaddr + PAGE_SIZE == page_address(page)) + kaddr += PAGE_SIZE; + else + kaddr = NULL; + } else if (!i) { + kaddr = page_address(page); + } + continue; + } + kaddr = NULL; + __set_bit(j, bounced); + + if (top) { + victim = availables[--top]; + get_page(victim); + } else { + victim = erofs_allocpage(pagepool, GFP_KERNEL, false); + if (unlikely(!victim)) + return -ENOMEM; + victim->mapping = Z_EROFS_MAPPING_STAGING; + } + rq->out[i] = victim; + } + return kaddr ? 1 : 0; +} + +static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, + u8 *src, unsigned int pageofs_in) +{ + /* + * if in-place decompression is ongoing, those decompressed + * pages should be copied in order to avoid being overlapped. + */ + struct page **in = rq->in; + u8 *const tmp = erofs_get_pcpubuf(0); + u8 *tmpp = tmp; + unsigned int inlen = rq->inputsize - pageofs_in; + unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); + + while (tmpp < tmp + inlen) { + if (!src) + src = kmap_atomic(*in); + memcpy(tmpp, src + pageofs_in, count); + kunmap_atomic(src); + src = NULL; + tmpp += count; + pageofs_in = 0; + count = PAGE_SIZE; + ++in; + } + return tmp; +} + +static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) +{ + unsigned int inputmargin, inlen; + u8 *src; + bool copied, support_0padding; + int ret; + + if (rq->inputsize > PAGE_SIZE) + return -EOPNOTSUPP; + + src = kmap_atomic(*rq->in); + inputmargin = 0; + support_0padding = false; + + /* decompression inplace is only safe when 0padding is enabled */ + if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { + support_0padding = true; + + while (!src[inputmargin & ~PAGE_MASK]) + if (!(++inputmargin & ~PAGE_MASK)) + break; + + if (inputmargin >= rq->inputsize) { + kunmap_atomic(src); + return -EIO; + } + } + + copied = false; + inlen = rq->inputsize - inputmargin; + if (rq->inplace_io) { + const uint oend = (rq->pageofs_out + + rq->outputsize) & ~PAGE_MASK; + const uint nr = PAGE_ALIGN(rq->pageofs_out + + rq->outputsize) >> PAGE_SHIFT; + + if (rq->partial_decoding || !support_0padding || + rq->out[nr - 1] != rq->in[0] || + rq->inputsize - oend < + LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { + src = generic_copy_inplace_data(rq, src, inputmargin); + inputmargin = 0; + copied = true; + } + } + + ret = LZ4_decompress_safe_partial(src + inputmargin, out, + inlen, rq->outputsize, + rq->outputsize); + if (ret < 0) { + errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", + __func__, src + inputmargin, inlen, inputmargin, + out, rq->outputsize); + WARN_ON(1); + print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, + 16, 1, src + inputmargin, inlen, true); + print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, + 16, 1, out, rq->outputsize, true); + ret = -EIO; + } + + if (copied) + erofs_put_pcpubuf(src); + else + kunmap_atomic(src); + return ret; +} + +static struct z_erofs_decompressor decompressors[] = { + [Z_EROFS_COMPRESSION_SHIFTED] = { + .name = "shifted" + }, + [Z_EROFS_COMPRESSION_LZ4] = { + .prepare_destpages = lz4_prepare_destpages, + .decompress = lz4_decompress, + .name = "lz4" + }, +}; + +static void copy_from_pcpubuf(struct page **out, const char *dst, + unsigned short pageofs_out, + unsigned int outputsize) +{ + const char *end = dst + outputsize; + const unsigned int righthalf = PAGE_SIZE - pageofs_out; + const char *cur = dst - pageofs_out; + + while (cur < end) { + struct page *const page = *out++; + + if (page) { + char *buf = kmap_atomic(page); + + if (cur >= dst) { + memcpy(buf, cur, min_t(uint, PAGE_SIZE, + end - cur)); + } else { + memcpy(buf + pageofs_out, cur + pageofs_out, + min_t(uint, righthalf, end - cur)); + } + kunmap_atomic(buf); + } + cur += PAGE_SIZE; + } +} + +static void *erofs_vmap(struct page **pages, unsigned int count) +{ + int i = 0; + + if (use_vmap) + return vmap(pages, count, VM_MAP, PAGE_KERNEL); + + while (1) { + void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); + + /* retry two more times (totally 3 times) */ + if (addr || ++i >= 3) + return addr; + vm_unmap_aliases(); + } + return NULL; +} + +static void erofs_vunmap(const void *mem, unsigned int count) +{ + if (!use_vmap) + vm_unmap_ram(mem, count); + else + vunmap(mem); +} + +static int decompress_generic(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const struct z_erofs_decompressor *alg = decompressors + rq->alg; + unsigned int dst_maptype; + void *dst; + int ret; + + if (nrpages_out == 1 && !rq->inplace_io) { + DBG_BUGON(!*rq->out); + dst = kmap_atomic(*rq->out); + dst_maptype = 0; + goto dstmap_out; + } + + /* + * For the case of small output size (especially much less + * than PAGE_SIZE), memcpy the decompressed data rather than + * compressed data is preferred. + */ + if (rq->outputsize <= PAGE_SIZE * 7 / 8) { + dst = erofs_get_pcpubuf(0); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + rq->inplace_io = false; + ret = alg->decompress(rq, dst); + if (!ret) + copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, + rq->outputsize); + + erofs_put_pcpubuf(dst); + return ret; + } + + ret = alg->prepare_destpages(rq, pagepool); + if (ret < 0) { + return ret; + } else if (ret) { + dst = page_address(*rq->out); + dst_maptype = 1; + goto dstmap_out; + } + + dst = erofs_vmap(rq->out, nrpages_out); + if (!dst) + return -ENOMEM; + dst_maptype = 2; + +dstmap_out: + ret = alg->decompress(rq, dst + rq->pageofs_out); + + if (!dst_maptype) + kunmap_atomic(dst); + else if (dst_maptype == 2) + erofs_vunmap(dst, nrpages_out); + return ret; +} + +static int shifted_decompress(const struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; + unsigned char *src, *dst; + + if (nrpages_out > 2) { + DBG_BUGON(1); + return -EIO; + } + + if (rq->out[0] == *rq->in) { + DBG_BUGON(nrpages_out != 1); + return 0; + } + + src = kmap_atomic(*rq->in); + if (!rq->out[0]) { + dst = NULL; + } else { + dst = kmap_atomic(rq->out[0]); + memcpy(dst + rq->pageofs_out, src, righthalf); + } + + if (rq->out[1] == *rq->in) { + memmove(src, src + righthalf, rq->pageofs_out); + } else if (nrpages_out == 2) { + if (dst) + kunmap_atomic(dst); + DBG_BUGON(!rq->out[1]); + dst = kmap_atomic(rq->out[1]); + memcpy(dst, src + righthalf, rq->pageofs_out); + } + if (dst) + kunmap_atomic(dst); + kunmap_atomic(src); + return 0; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) + return shifted_decompress(rq, pagepool); + return decompress_generic(rq, pagepool); +} + diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c new file mode 100644 index 000000000000..1976e60e5174 --- /dev/null +++ b/fs/erofs/dir.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" + +static void debug_one_dentry(unsigned char d_type, const char *de_name, + unsigned int de_namelen) +{ +#ifdef CONFIG_EROFS_FS_DEBUG + /* since the on-disk name could not have the trailing '\0' */ + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; + + memcpy(dbg_namebuf, de_name, de_namelen); + dbg_namebuf[de_namelen] = '\0'; + + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, + de_namelen, d_type); +#endif +} + +static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, + void *dentry_blk, unsigned int *ofs, + unsigned int nameoff, unsigned int maxsize) +{ + struct erofs_dirent *de = dentry_blk + *ofs; + const struct erofs_dirent *end = dentry_blk + nameoff; + + while (de < end) { + const char *de_name; + unsigned int de_namelen; + unsigned char d_type; + + d_type = fs_ftype_to_dtype(de->file_type); + + nameoff = le16_to_cpu(de->nameoff); + de_name = (char *)dentry_blk + nameoff; + + /* the last dirent in the block? */ + if (de + 1 >= end) + de_namelen = strnlen(de_name, maxsize - nameoff); + else + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; + + /* a corrupted entry is found */ + if (unlikely(nameoff + de_namelen > maxsize || + de_namelen > EROFS_NAME_LEN)) { + errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + debug_one_dentry(d_type, de_name, de_namelen); + if (!dir_emit(ctx, de_name, de_namelen, + le64_to_cpu(de->nid), d_type)) + /* stopped by some reason */ + return 1; + ++de; + *ofs += sizeof(struct erofs_dirent); + } + *ofs = maxsize; + return 0; +} + +static int erofs_readdir(struct file *f, struct dir_context *ctx) +{ + struct inode *dir = file_inode(f); + struct address_space *mapping = dir->i_mapping; + const size_t dirsize = i_size_read(dir); + unsigned int i = ctx->pos / EROFS_BLKSIZ; + unsigned int ofs = ctx->pos % EROFS_BLKSIZ; + int err = 0; + bool initial = true; + + while (ctx->pos < dirsize) { + struct page *dentry_page; + struct erofs_dirent *de; + unsigned int nameoff, maxsize; + + dentry_page = read_mapping_page(mapping, i, NULL); + if (dentry_page == ERR_PTR(-ENOMEM)) { + err = -ENOMEM; + break; + } else if (IS_ERR(dentry_page)) { + errln("fail to readdir of logical block %u of nid %llu", + i, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + break; + } + + de = (struct erofs_dirent *)kmap(dentry_page); + + nameoff = le16_to_cpu(de->nameoff); + + if (unlikely(nameoff < sizeof(struct erofs_dirent) || + nameoff >= PAGE_SIZE)) { + errln("%s, invalid de[0].nameoff %u @ nid %llu", + __func__, nameoff, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + goto skip_this; + } + + maxsize = min_t(unsigned int, + dirsize - ctx->pos + ofs, PAGE_SIZE); + + /* search dirents at the arbitrary position */ + if (unlikely(initial)) { + initial = false; + + ofs = roundup(ofs, sizeof(struct erofs_dirent)); + if (unlikely(ofs >= nameoff)) + goto skip_this; + } + + err = erofs_fill_dentries(dir, ctx, de, &ofs, + nameoff, maxsize); +skip_this: + kunmap(dentry_page); + + put_page(dentry_page); + + ctx->pos = blknr_to_addr(i) + ofs; + + if (unlikely(err)) + break; + ++i; + ofs = 0; + } + return err < 0 ? err : 0; +} + +const struct file_operations erofs_dir_fops = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate_shared = erofs_readdir, +}; + diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h new file mode 100644 index 000000000000..afa7d45ca958 --- /dev/null +++ b/fs/erofs/erofs_fs.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_H +#define __EROFS_FS_H + +/* Enhanced(Extended) ROM File System */ +#define EROFS_SUPER_OFFSET 1024 + +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 +#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING + +struct erofs_super_block { +/* 0 */__le32 magic; /* in the little endian */ +/* 4 */__le32 checksum; /* crc32c(super_block) */ +/* 8 */__le32 features; /* (aka. feature_compat) */ +/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ +/* 13 */__u8 reserved; + +/* 14 */__le16 root_nid; +/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ + +/* 24 */__le64 build_time; /* inode v1 time derivation */ +/* 32 */__le32 build_time_nsec; +/* 36 */__le32 blocks; /* used for statfs */ +/* 40 */__le32 meta_blkaddr; +/* 44 */__le32 xattr_blkaddr; +/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ +/* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ + +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ + +/* + * erofs inode data mapping: + * 0 - inode plain without inline data A: + * inode, [xattrs], ... | ... | no-holed data + * 1 - inode VLE compression B (legacy): + * inode, [xattrs], extents ... | ... + * 2 - inode plain with inline data C: + * inode, [xattrs], last_inline_data, ... | ... | no-holed data + * 3 - inode compression D: + * inode, [xattrs], map_header, extents ... | ... + * 4~7 - reserved + */ +enum { + EROFS_INODE_FLAT_PLAIN, + EROFS_INODE_FLAT_COMPRESSION_LEGACY, + EROFS_INODE_FLAT_INLINE, + EROFS_INODE_FLAT_COMPRESSION, + EROFS_INODE_LAYOUT_MAX +}; + +static inline bool erofs_inode_is_data_compressed(unsigned int datamode) +{ + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return true; + return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; +} + +/* bit definitions of inode i_advise */ +#define EROFS_I_VERSION_BITS 1 +#define EROFS_I_DATA_MAPPING_BITS 3 + +#define EROFS_I_VERSION_BIT 0 +#define EROFS_I_DATA_MAPPING_BIT 1 + +struct erofs_inode_v1 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_nlink; +/* 8 */__le32 i_size; +/* 12 */__le32 i_reserved; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; +/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ +/* 24 */__le16 i_uid; +/* 26 */__le16 i_gid; +/* 28 */__le32 i_reserved2; +} __packed; + +/* 32 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V1 0 +/* 64 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V2 1 + +struct erofs_inode_v2 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_reserved; +/* 8 */__le64 i_size; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; + + /* only used for 32-bit stat compatibility */ +/* 20 */__le32 i_ino; + +/* 24 */__le32 i_uid; +/* 28 */__le32 i_gid; +/* 32 */__le64 i_ctime; +/* 40 */__le32 i_ctime_nsec; +/* 44 */__le32 i_nlink; +/* 48 */__u8 i_reserved2[16]; +} __packed; /* 64 bytes */ + +#define EROFS_MAX_SHARED_XATTRS (128) +/* h_shared_count between 129 ... 255 are special # */ +#define EROFS_SHARED_XATTR_EXTENT (255) + +/* + * inline xattrs (n == i_xattr_icount): + * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes + * 12 bytes / \ + * / \ + * /-----------------------\ + * | erofs_xattr_entries+ | + * +-----------------------+ + * inline xattrs must starts in erofs_xattr_ibody_header, + * for read-only fs, no need to introduce h_refcount + */ +struct erofs_xattr_ibody_header { + __le32 h_reserved; + __u8 h_shared_count; + __u8 h_reserved2[7]; + __le32 h_shared_xattrs[0]; /* shared xattr id array */ +} __packed; + +/* Name indexes */ +#define EROFS_XATTR_INDEX_USER 1 +#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 +#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 +#define EROFS_XATTR_INDEX_TRUSTED 4 +#define EROFS_XATTR_INDEX_LUSTRE 5 +#define EROFS_XATTR_INDEX_SECURITY 6 + +/* xattr entry (for both inline & shared xattrs) */ +struct erofs_xattr_entry { + __u8 e_name_len; /* length of name */ + __u8 e_name_index; /* attribute name index */ + __le16 e_value_size; /* size of attribute value */ + /* followed by e_name and e_value */ + char e_name[0]; /* attribute name */ +} __packed; + +#define ondisk_xattr_ibody_size(count) ({\ + u32 __count = le16_to_cpu(count); \ + ((__count) == 0) ? 0 : \ + sizeof(struct erofs_xattr_ibody_header) + \ + sizeof(__u32) * ((__count) - 1); }) + +#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) +#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ + sizeof(struct erofs_xattr_entry) + \ + (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) + +/* available compression algorithm types */ +enum { + Z_EROFS_COMPRESSION_LZ4, + Z_EROFS_COMPRESSION_MAX +}; + +/* + * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) + * e.g. for 4k logical cluster size, 4B if compacted 2B is off; + * (4B) + 2B + (4B) if compacted 2B is on. + */ +#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 + +#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) + +struct z_erofs_map_header { + __le32 h_reserved1; + __le16 h_advise; + /* + * bit 0-3 : algorithm type of head 1 (logical cluster type 01); + * bit 4-7 : algorithm type of head 2 (logical cluster type 11). + */ + __u8 h_algorithmtype; + /* + * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; + * bit 3-4 : (physical - logical) cluster bits of head 1: + * For example, if logical clustersize = 4096, 1 for 8192. + * bit 5-7 : (physical - logical) cluster bits of head 2. + */ + __u8 h_clusterbits; +}; + +#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 + +/* + * Z_EROFS Variable-sized Logical Extent cluster type: + * 0 - literal (uncompressed) cluster + * 1 - compressed cluster (for the head logical cluster) + * 2 - compressed cluster (for the other logical clusters) + * + * In detail, + * 0 - literal (uncompressed) cluster, + * di_advise = 0 + * di_clusterofs = the literal data offset of the cluster + * di_blkaddr = the blkaddr of the literal cluster + * + * 1 - compressed cluster (for the head logical cluster) + * di_advise = 1 + * di_clusterofs = the decompressed data offset of the cluster + * di_blkaddr = the blkaddr of the compressed cluster + * + * 2 - compressed cluster (for the other logical clusters) + * di_advise = 2 + * di_clusterofs = + * the decompressed data offset in its own head cluster + * di_u.delta[0] = distance to its corresponding head cluster + * di_u.delta[1] = distance to its corresponding tail cluster + * (di_advise could be 0, 1 or 2) + */ +enum { + Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, + Z_EROFS_VLE_CLUSTER_TYPE_HEAD, + Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, + Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, + Z_EROFS_VLE_CLUSTER_TYPE_MAX +}; + +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 + +struct z_erofs_vle_decompressed_index { + __le16 di_advise; + /* where to decompress in the head cluster */ + __le16 di_clusterofs; + + union { + /* for the head cluster */ + __le32 blkaddr; + /* + * for the rest clusters + * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) + * [0] - pointing to the head cluster + * [1] - pointing to the tail cluster + */ + __le16 delta[2]; + } di_u __packed; /* 8 bytes */ +} __packed; + +#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ + (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ + sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) + +/* dirent sorts in alphabet order, thus we can do binary search */ +struct erofs_dirent { + __le64 nid; /* 0, node number */ + __le16 nameoff; /* 8, start offset of file name */ + __u8 file_type; /* 10, file type */ + __u8 reserved; /* 11, reserved */ +} __packed; + +/* + * EROFS file types should match generic FT_* types and + * it seems no need to add BUILD_BUG_ONs since potential + * unmatchness will break other fses as well... + */ + +#define EROFS_NAME_LEN 255 + +/* check the EROFS on-disk layout strictly at compile time */ +static inline void erofs_check_ondisk_layout_definitions(void) +{ + BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); + BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); + BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); + BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); + BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); + BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); + BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); + BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); + + BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < + Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); +} + +#endif + diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c new file mode 100644 index 000000000000..80f4fe919ee7 --- /dev/null +++ b/fs/erofs/inode.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +/* no locking */ +static int read_inode(struct inode *inode, void *data) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode_v1 *v1 = data; + const unsigned int advise = le16_to_cpu(v1->i_advise); + erofs_blk_t nblks = 0; + + vi->datamode = __inode_data_mapping(advise); + + if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { + errln("unsupported data mapping %u of nid %llu", + vi->datamode, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { + struct erofs_inode_v2 *v2 = data; + + vi->inode_isize = sizeof(struct erofs_inode_v2); + vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v2->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v2->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le32_to_cpu(v2->i_uid)); + i_gid_write(inode, le32_to_cpu(v2->i_gid)); + set_nlink(inode, le32_to_cpu(v2->i_nlink)); + + /* ns timestamp */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + le64_to_cpu(v2->i_ctime); + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + le32_to_cpu(v2->i_ctime_nsec); + + inode->i_size = le64_to_cpu(v2->i_size); + + /* total blocks for compressed files */ + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v2->i_u.compressed_blocks); + } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + vi->inode_isize = sizeof(struct erofs_inode_v1); + vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v1->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v1->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le16_to_cpu(v1->i_uid)); + i_gid_write(inode, le16_to_cpu(v1->i_gid)); + set_nlink(inode, le16_to_cpu(v1->i_nlink)); + + /* use build time to derive all file time */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + sbi->build_time; + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + sbi->build_time_nsec; + + inode->i_size = le32_to_cpu(v1->i_size); + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v1->i_u.compressed_blocks); + } else { + errln("unsupported on-disk inode version %u of nid %llu", + __inode_version(advise), vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (!nblks) + /* measure inode.i_blocks as generic filesystems */ + inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; + else + inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; + return 0; + +bogusimode: + errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; +} + +/* + * try_lock can be required since locking order is: + * file data(fs_inode) + * meta(bd_inode) + * but the majority of the callers is "iget", + * in that case we are pretty sure no deadlock since + * no data operations exist. However I tend to + * try_lock since it takes no much overhead and + * will success immediately. + */ +static int fill_inline_data(struct inode *inode, void *data, + unsigned int m_pofs) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_sb_info *sbi = EROFS_I_SB(inode); + + /* should be inode inline C */ + if (!is_inode_flat_inline(inode)) + return 0; + + /* fast symlink (following ext4) */ + if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { + char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); + + if (unlikely(!lnk)) + return -ENOMEM; + + m_pofs += vi->inode_isize + vi->xattr_isize; + + /* inline symlink data shouldn't across page boundary as well */ + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { + kfree(lnk); + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* get in-page inline data */ + memcpy(lnk, data + m_pofs, inode->i_size); + lnk[inode->i_size] = '\0'; + + inode->i_link = lnk; + set_inode_fast_symlink(inode); + } + return 0; +} + +static int fill_inode(struct inode *inode, int isdir) +{ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + struct erofs_vnode *vi = EROFS_V(inode); + struct page *page; + void *data; + int err; + erofs_blk_t blkaddr; + unsigned int ofs; + erofs_off_t inode_loc; + + trace_erofs_fill_inode(inode, isdir); + inode_loc = iloc(sbi, vi->nid); + blkaddr = erofs_blknr(inode_loc); + ofs = erofs_blkoff(inode_loc); + + debugln("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, ofs, blkaddr); + + page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); + + if (IS_ERR(page)) { + errln("failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return PTR_ERR(page); + } + + DBG_BUGON(!PageUptodate(page)); + data = page_address(page); + + err = read_inode(inode, data + ofs); + if (!err) { + /* setup the new inode */ + if (S_ISREG(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + inode->i_fop = &generic_ro_fops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &erofs_dir_iops; + inode->i_fop = &erofs_dir_fops; + } else if (S_ISLNK(inode->i_mode)) { + /* by default, page_get_link is used for symlink */ + inode->i_op = &erofs_symlink_iops; + inode_nohighmem(inode); + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + goto out_unlock; + } else { + err = -EFSCORRUPTED; + goto out_unlock; + } + + if (is_inode_layout_compression(inode)) { + err = z_erofs_fill_inode(inode); + goto out_unlock; + } + + inode->i_mapping->a_ops = &erofs_raw_access_aops; + + /* fill last page if inline data is available */ + err = fill_inline_data(inode, data, ofs); + } + +out_unlock: + unlock_page(page); + put_page(page); + return err; +} + +/* + * erofs nid is 64bits, but i_ino is 'unsigned long', therefore + * we should do more for 32-bit platform to find the right inode. + */ +#if BITS_PER_LONG == 32 +static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + return EROFS_V(inode)->nid == nid; +} + +static int erofs_iget_set_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + inode->i_ino = erofs_inode_hash(nid); + return 0; +} +#endif + +static inline struct inode *erofs_iget_locked(struct super_block *sb, + erofs_nid_t nid) +{ + const unsigned long hashval = erofs_inode_hash(nid); + +#if BITS_PER_LONG >= 64 + /* it is safe to use iget_locked for >= 64-bit platform */ + return iget_locked(sb, hashval); +#else + return iget5_locked(sb, hashval, erofs_ilookup_test_actor, + erofs_iget_set_actor, &nid); +#endif +} + +struct inode *erofs_iget(struct super_block *sb, + erofs_nid_t nid, + bool isdir) +{ + struct inode *inode = erofs_iget_locked(sb, nid); + + if (unlikely(!inode)) + return ERR_PTR(-ENOMEM); + + if (inode->i_state & I_NEW) { + int err; + struct erofs_vnode *vi = EROFS_V(inode); + + vi->nid = nid; + + err = fill_inode(inode, isdir); + if (likely(!err)) + unlock_new_inode(inode); + else { + iget_failed(inode); + inode = ERR_PTR(err); + } + } + return inode; +} + +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *const inode = d_inode(path->dentry); + + if (is_inode_layout_compression(inode)) + stat->attributes |= STATX_ATTR_COMPRESSED; + + stat->attributes |= STATX_ATTR_IMMUTABLE; + stat->attributes_mask |= (STATX_ATTR_COMPRESSED | + STATX_ATTR_IMMUTABLE); + + generic_fillattr(inode, stat); + return 0; +} + +const struct inode_operations erofs_generic_iops = { + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_symlink_iops = { + .get_link = page_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_fast_symlink_iops = { + .get_link = simple_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h new file mode 100644 index 000000000000..620b73fcc416 --- /dev/null +++ b/fs/erofs/internal.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_INTERNAL_H +#define __EROFS_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "erofs_fs.h" + +/* redefine pr_fmt "erofs: " */ +#undef pr_fmt +#define pr_fmt(fmt) "erofs: " fmt + +#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) +#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) +#ifdef CONFIG_EROFS_FS_DEBUG +#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) +#define DBG_BUGON BUG_ON +#else +#define debugln(x, ...) ((void)0) +#define DBG_BUGON(x) ((void)(x)) +#endif /* !CONFIG_EROFS_FS_DEBUG */ + +enum { + FAULT_KMALLOC, + FAULT_READ_IO, + FAULT_MAX, +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +extern const char *erofs_fault_name[FAULT_MAX]; +#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) + +struct erofs_fault_info { + atomic_t inject_ops; + unsigned int inject_rate; + unsigned int inject_type; +}; +#endif /* CONFIG_EROFS_FAULT_INJECTION */ + +/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ +#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 + +typedef u64 erofs_nid_t; +typedef u64 erofs_off_t; +/* data type for filesystem-wide blocks number */ +typedef u32 erofs_blk_t; + +struct erofs_sb_info { +#ifdef CONFIG_EROFS_FS_ZIP + /* list for all registered superblocks, mainly for shrinker */ + struct list_head list; + struct mutex umount_mutex; + + /* the dedicated workstation for compression */ + struct radix_tree_root workstn_tree; + + /* threshold for decompression synchronously */ + unsigned int max_sync_decompress_pages; + + unsigned int shrinker_run_no; + + /* current strategy of how to use managed cache */ + unsigned char cache_strategy; + + /* pseudo inode to manage cached pages */ + struct inode *managed_cache; +#endif /* CONFIG_EROFS_FS_ZIP */ + u32 blocks; + u32 meta_blkaddr; +#ifdef CONFIG_EROFS_FS_XATTR + u32 xattr_blkaddr; +#endif + + /* inode slot unit size in bit shift */ + unsigned char islotbits; + + u32 build_time_nsec; + u64 build_time; + + /* what we really care is nid, rather than ino.. */ + erofs_nid_t root_nid; + /* used for statfs, f_files - f_favail */ + u64 inos; + + u8 uuid[16]; /* 128-bit uuid for volume */ + u8 volume_name[16]; /* volume name */ + u32 requirements; + + unsigned int mount_opt; + +#ifdef CONFIG_EROFS_FAULT_INJECTION + struct erofs_fault_info fault_info; /* For fault injection */ +#endif +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +#define erofs_show_injection_info(type) \ + infoln("inject %s in %s of %pS", erofs_fault_name[type], \ + __func__, __builtin_return_address(0)) + +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (!ffi->inject_rate) + return false; + + if (!IS_FAULT_SET(ffi, type)) + return false; + + atomic_inc(&ffi->inject_ops); + if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { + atomic_set(&ffi->inject_ops, 0); + return true; + } + return false; +} +#else +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + return false; +} + +static inline void erofs_show_injection_info(int type) +{ +} +#endif /* !CONFIG_EROFS_FAULT_INJECTION */ + +static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, + size_t size, gfp_t flags) +{ + if (time_to_inject(sbi, FAULT_KMALLOC)) { + erofs_show_injection_info(FAULT_KMALLOC); + return NULL; + } + return kmalloc(size, flags); +} + +#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) +#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) + +/* Mount flags set via mount options or defaults */ +#define EROFS_MOUNT_XATTR_USER 0x00000010 +#define EROFS_MOUNT_POSIX_ACL 0x00000020 +#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 + +#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) +#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) +#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) + +#ifdef CONFIG_EROFS_FS_ZIP +enum { + EROFS_ZIP_CACHE_DISABLED, + EROFS_ZIP_CACHE_READAHEAD, + EROFS_ZIP_CACHE_READAROUND +}; + +#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) + +/* basic unit of the workstation of a super_block */ +struct erofs_workgroup { + /* the workgroup index in the workstation */ + pgoff_t index; + + /* overall workgroup reference count */ + atomic_t refcount; +}; + +#if defined(CONFIG_SMP) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + /* + * other observers should notice all modifications + * in the freezing period. + */ + smp_mb(); + atomic_set(&grp->refcount, orig_val); + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + return atomic_cond_read_relaxed(&grp->refcount, + VAL != EROFS_LOCKED_MAGIC); +} +#else +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + /* no need to spin on UP platforms, let's just disable preemption. */ + if (val != atomic_read(&grp->refcount)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + int v = atomic_read(&grp->refcount); + + /* workgroup is never freezed on uniprocessor systems */ + DBG_BUGON(v == EROFS_LOCKED_MAGIC); + return v; +} +#endif /* !CONFIG_SMP */ + +/* hard limit of pages per compressed cluster */ +#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) +#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES +#else +#define EROFS_PCPUBUF_NR_PAGES 0 +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* we strictly follow PAGE_SIZE and no buffer head yet */ +#define LOG_BLOCK_SIZE PAGE_SHIFT + +#undef LOG_SECTORS_PER_BLOCK +#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) + +#undef SECTORS_PER_BLOCK +#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) + +#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) + +#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) +#error erofs cannot be used in this platform +#endif + +#define EROFS_IO_MAX_RETRIES_NOFAIL 5 + +#define ROOT_NID(sb) ((sb)->root_nid) + +#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) +#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) +#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) + +static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) +{ + return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); +} + +/* atomic flag definitions */ +#define EROFS_V_EA_INITED_BIT 0 +#define EROFS_V_Z_INITED_BIT 1 + +/* bitlock definitions (arranged in reverse order) */ +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) +#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) + +struct erofs_vnode { + erofs_nid_t nid; + + /* atomic flags (including bitlocks) */ + unsigned long flags; + + unsigned char datamode; + unsigned char inode_isize; + unsigned short xattr_isize; + + unsigned int xattr_shared_count; + unsigned int *xattr_shared_xattrs; + + union { + erofs_blk_t raw_blkaddr; +#ifdef CONFIG_EROFS_FS_ZIP + struct { + unsigned short z_advise; + unsigned char z_algorithmtype[2]; + unsigned char z_logical_clusterbits; + unsigned char z_physical_clusterbits[2]; + }; +#endif /* CONFIG_EROFS_FS_ZIP */ + }; + /* the corresponding vfs inode */ + struct inode vfs_inode; +}; + +#define EROFS_V(ptr) \ + container_of(ptr, struct erofs_vnode, vfs_inode) + +#define __inode_advise(x, bit, bits) \ + (((x) >> (bit)) & ((1 << (bits)) - 1)) + +#define __inode_version(advise) \ + __inode_advise(advise, EROFS_I_VERSION_BIT, \ + EROFS_I_VERSION_BITS) + +#define __inode_data_mapping(advise) \ + __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ + EROFS_I_DATA_MAPPING_BITS) + +static inline unsigned long inode_datablocks(struct inode *inode) +{ + /* since i_size cannot be changed */ + return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); +} + +static inline bool is_inode_layout_compression(struct inode *inode) +{ + return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); +} + +static inline bool is_inode_flat_inline(struct inode *inode) +{ + return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; +} + +extern const struct super_operations erofs_sops; + +extern const struct address_space_operations erofs_raw_access_aops; +#ifdef CONFIG_EROFS_FS_ZIP +extern const struct address_space_operations z_erofs_vle_normalaccess_aops; +#endif + +/* + * Logical to physical block mapping, used by erofs_map_blocks() + * + * Different with other file systems, it is used for 2 access modes: + * + * 1) RAW access mode: + * + * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, + * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). + * + * Note that m_lblk in the RAW access mode refers to the number of + * the compressed ondisk block rather than the uncompressed + * in-memory block for the compressed file. + * + * m_pofs equals to m_lofs except for the inline data page. + * + * 2) Normal access mode: + * + * If the inode is not compressed, it has no difference with + * the RAW access mode. However, if the inode is compressed, + * users should pass a valid (m_lblk, m_lofs) pair, and get + * the needed m_pblk, m_pofs, m_len to get the compressed data + * and the updated m_lblk, m_lofs which indicates the start + * of the corresponding uncompressed data in the file. + */ +enum { + BH_Zipped = BH_PrivateStart, + BH_FullMapped, +}; + +/* Has a disk mapping */ +#define EROFS_MAP_MAPPED (1 << BH_Mapped) +/* Located in metadata (could be copied from bd_inode) */ +#define EROFS_MAP_META (1 << BH_Meta) +/* The extent has been compressed */ +#define EROFS_MAP_ZIPPED (1 << BH_Zipped) +/* The length of extent is full */ +#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) + +struct erofs_map_blocks { + erofs_off_t m_pa, m_la; + u64 m_plen, m_llen; + + unsigned int m_flags; + + struct page *mpage; +}; + +/* Flags used by erofs_map_blocks() */ +#define EROFS_GET_BLOCKS_RAW 0x0001 + +/* zmap.c */ +#ifdef CONFIG_EROFS_FS_ZIP +int z_erofs_fill_inode(struct inode *inode); +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags); +#else +static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } +static inline int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* data.c */ +static inline struct bio *erofs_grab_bio(struct super_block *sb, + erofs_blk_t blkaddr, + unsigned int nr_pages, + void *bi_private, bio_end_io_t endio, + bool nofail) +{ + const gfp_t gfp = GFP_NOIO; + struct bio *bio; + + do { + if (nr_pages == 1) { + bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); + if (unlikely(!bio)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + break; + } + bio = bio_alloc(gfp, nr_pages); + nr_pages /= 2; + } while (unlikely(!bio)); + + bio->bi_end_io = endio; + bio_set_dev(bio, sb->s_bdev); + bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; + bio->bi_private = bi_private; + return bio; +} + +static inline void __submit_bio(struct bio *bio, unsigned int op, + unsigned int op_flags) +{ + bio_set_op_attrs(bio, op, op_flags); + submit_bio(bio); +} + +struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, + bool prio, bool nofail); + +static inline struct page *erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio) +{ + return __erofs_get_meta_page(sb, blkaddr, prio, false); +} + +int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); + +static inline struct page *erofs_get_inline_page(struct inode *inode, + erofs_blk_t blkaddr) +{ + return erofs_get_meta_page(inode->i_sb, blkaddr, + S_ISDIR(inode->i_mode)); +} + +/* inode.c */ +static inline unsigned long erofs_inode_hash(erofs_nid_t nid) +{ +#if BITS_PER_LONG == 32 + return (nid >> 32) ^ (nid & 0xffffffff); +#else + return nid; +#endif +} + +extern const struct inode_operations erofs_generic_iops; +extern const struct inode_operations erofs_symlink_iops; +extern const struct inode_operations erofs_fast_symlink_iops; + +static inline void set_inode_fast_symlink(struct inode *inode) +{ + inode->i_op = &erofs_fast_symlink_iops; +} + +static inline bool is_inode_fast_symlink(struct inode *inode) +{ + return inode->i_op == &erofs_fast_symlink_iops; +} + +struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); + +/* namei.c */ +extern const struct inode_operations erofs_dir_iops; + +int erofs_namei(struct inode *dir, struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type); + +/* dir.c */ +extern const struct file_operations erofs_dir_fops; + +/* utils.c / zdata.c */ +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +void *erofs_get_pcpubuf(unsigned int pagenr); +#define erofs_put_pcpubuf(buf) do { \ + (void)&(buf); \ + preempt_enable(); \ +} while (0) +#else +static inline void *erofs_get_pcpubuf(unsigned int pagenr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +#define erofs_put_pcpubuf(buf) do {} while (0) +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +int erofs_workgroup_put(struct erofs_workgroup *grp); +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag); +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, bool tag); +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); +void erofs_shrinker_register(struct super_block *sb); +void erofs_shrinker_unregister(struct super_block *sb); +int __init erofs_init_shrinker(void); +void erofs_exit_shrinker(void); +int __init z_erofs_init_zip_subsystem(void); +void z_erofs_exit_zip_subsystem(void); +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page); +#else +static inline void erofs_shrinker_register(struct super_block *sb) {} +static inline void erofs_shrinker_unregister(struct super_block *sb) {} +static inline int erofs_init_shrinker(void) { return 0; } +static inline void erofs_exit_shrinker(void) {} +static inline int z_erofs_init_zip_subsystem(void) { return 0; } +static inline void z_erofs_exit_zip_subsystem(void) {} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + +#endif /* __EROFS_INTERNAL_H */ + diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c new file mode 100644 index 000000000000..8832b5d95d91 --- /dev/null +++ b/fs/erofs/namei.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +struct erofs_qstr { + const unsigned char *name; + const unsigned char *end; +}; + +/* based on the end of qn is accurate and it must have the trailing '\0' */ +static inline int dirnamecmp(const struct erofs_qstr *qn, + const struct erofs_qstr *qd, + unsigned int *matched) +{ + unsigned int i = *matched; + + /* + * on-disk error, let's only BUG_ON in the debugging mode. + * otherwise, it will return 1 to just skip the invalid name + * and go on (in consideration of the lookup performance). + */ + DBG_BUGON(qd->name > qd->end); + + /* qd could not have trailing '\0' */ + /* However it is absolutely safe if < qd->end */ + while (qd->name + i < qd->end && qd->name[i] != '\0') { + if (qn->name[i] != qd->name[i]) { + *matched = i; + return qn->name[i] > qd->name[i] ? 1 : -1; + } + ++i; + } + *matched = i; + /* See comments in __d_alloc on the terminating NUL character */ + return qn->name[i] == '\0' ? 0 : 1; +} + +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) + +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, + u8 *data, + unsigned int dirblksize, + const int ndirents) +{ + int head, back; + unsigned int startprfx, endprfx; + struct erofs_dirent *const de = (struct erofs_dirent *)data; + + /* since the 1st dirent has been evaluated previously */ + head = 1; + back = ndirents - 1; + startprfx = endprfx = 0; + + while (head <= back) { + const int mid = head + (back - head) / 2; + const int nameoff = nameoff_from_disk(de[mid].nameoff, + dirblksize); + unsigned int matched = min(startprfx, endprfx); + struct erofs_qstr dname = { + .name = data + nameoff, + .end = unlikely(mid >= ndirents - 1) ? + data + dirblksize : + data + nameoff_from_disk(de[mid + 1].nameoff, + dirblksize) + }; + + /* string comparison without already matched prefix */ + int ret = dirnamecmp(name, &dname, &matched); + + if (unlikely(!ret)) { + return de + mid; + } else if (ret > 0) { + head = mid + 1; + startprfx = matched; + } else { + back = mid - 1; + endprfx = matched; + } + } + + return ERR_PTR(-ENOENT); +} + +static struct page *find_target_block_classic(struct inode *dir, + struct erofs_qstr *name, + int *_ndirents) +{ + unsigned int startprfx, endprfx; + int head, back; + struct address_space *const mapping = dir->i_mapping; + struct page *candidate = ERR_PTR(-ENOENT); + + startprfx = endprfx = 0; + head = 0; + back = inode_datablocks(dir) - 1; + + while (head <= back) { + const int mid = head + (back - head) / 2; + struct page *page = read_mapping_page(mapping, mid, NULL); + + if (!IS_ERR(page)) { + struct erofs_dirent *de = kmap_atomic(page); + const int nameoff = nameoff_from_disk(de->nameoff, + EROFS_BLKSIZ); + const int ndirents = nameoff / sizeof(*de); + int diff; + unsigned int matched; + struct erofs_qstr dname; + + if (unlikely(!ndirents)) { + kunmap_atomic(de); + put_page(page); + errln("corrupted dir block %d @ nid %llu", + mid, EROFS_V(dir)->nid); + DBG_BUGON(1); + page = ERR_PTR(-EFSCORRUPTED); + goto out; + } + + matched = min(startprfx, endprfx); + + dname.name = (u8 *)de + nameoff; + if (ndirents == 1) + dname.end = (u8 *)de + EROFS_BLKSIZ; + else + dname.end = (u8 *)de + + nameoff_from_disk(de[1].nameoff, + EROFS_BLKSIZ); + + /* string comparison without already matched prefix */ + diff = dirnamecmp(name, &dname, &matched); + kunmap_atomic(de); + + if (unlikely(!diff)) { + *_ndirents = 0; + goto out; + } else if (diff > 0) { + head = mid + 1; + startprfx = matched; + + if (!IS_ERR(candidate)) + put_page(candidate); + candidate = page; + *_ndirents = ndirents; + } else { + put_page(page); + + back = mid - 1; + endprfx = matched; + } + continue; + } +out: /* free if the candidate is valid */ + if (!IS_ERR(candidate)) + put_page(candidate); + return page; + } + return candidate; +} + +int erofs_namei(struct inode *dir, + struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type) +{ + int ndirents; + struct page *page; + void *data; + struct erofs_dirent *de; + struct erofs_qstr qn; + + if (unlikely(!dir->i_size)) + return -ENOENT; + + qn.name = name->name; + qn.end = name->name + name->len; + + ndirents = 0; + page = find_target_block_classic(dir, &qn, &ndirents); + + if (IS_ERR(page)) + return PTR_ERR(page); + + data = kmap_atomic(page); + /* the target page has been mapped */ + if (ndirents) + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); + else + de = (struct erofs_dirent *)data; + + if (!IS_ERR(de)) { + *nid = le64_to_cpu(de->nid); + *d_type = de->file_type; + } + + kunmap_atomic(data); + put_page(page); + + return PTR_ERR_OR_ZERO(de); +} + +/* NOTE: i_mutex is already held by vfs */ +static struct dentry *erofs_lookup(struct inode *dir, + struct dentry *dentry, + unsigned int flags) +{ + int err; + erofs_nid_t nid; + unsigned int d_type; + struct inode *inode; + + DBG_BUGON(!d_really_is_negative(dentry)); + /* dentry must be unhashed in lookup, no need to worry about */ + DBG_BUGON(!d_unhashed(dentry)); + + trace_erofs_lookup(dir, dentry, flags); + + /* file name exceeds fs limit */ + if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) + return ERR_PTR(-ENAMETOOLONG); + + /* false uninitialized warnings on gcc 4.8.x */ + err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); + + if (err == -ENOENT) { + /* negative dentry */ + inode = NULL; + } else if (unlikely(err)) { + inode = ERR_PTR(err); + } else { + debugln("%s, %s (nid %llu) found, d_type %u", __func__, + dentry->d_name.name, nid, d_type); + inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); + } + return d_splice_alias(inode, dentry); +} + +const struct inode_operations erofs_dir_iops = { + .lookup = erofs_lookup, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/super.c b/fs/erofs/super.c new file mode 100644 index 000000000000..6d3a9bcb8daa --- /dev/null +++ b/fs/erofs/super.c @@ -0,0 +1,669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include +#include +#include +#include +#include "xattr.h" + +#define CREATE_TRACE_POINTS +#include + +static struct kmem_cache *erofs_inode_cachep __read_mostly; + +static void init_once(void *ptr) +{ + struct erofs_vnode *vi = ptr; + + inode_init_once(&vi->vfs_inode); +} + +static int __init erofs_init_inode_cache(void) +{ + erofs_inode_cachep = kmem_cache_create("erofs_inode", + sizeof(struct erofs_vnode), 0, + SLAB_RECLAIM_ACCOUNT, + init_once); + + return erofs_inode_cachep ? 0 : -ENOMEM; +} + +static void erofs_exit_inode_cache(void) +{ + kmem_cache_destroy(erofs_inode_cachep); +} + +static struct inode *alloc_inode(struct super_block *sb) +{ + struct erofs_vnode *vi = + kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); + + if (!vi) + return NULL; + + /* zero out everything except vfs_inode */ + memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); + return &vi->vfs_inode; +} + +static void free_inode(struct inode *inode) +{ + struct erofs_vnode *vi = EROFS_V(inode); + + /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ + if (is_inode_fast_symlink(inode)) + kfree(inode->i_link); + + kfree(vi->xattr_shared_xattrs); + + kmem_cache_free(erofs_inode_cachep, vi); +} + +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + +static int superblock_read(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + struct buffer_head *bh; + struct erofs_super_block *layout; + unsigned int blkszbits; + int ret; + + bh = sb_bread(sb, 0); + + if (!bh) { + errln("cannot read erofs superblock"); + return -EIO; + } + + sbi = EROFS_SB(sb); + layout = (struct erofs_super_block *)((u8 *)bh->b_data + + EROFS_SUPER_OFFSET); + + ret = -EINVAL; + if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { + errln("cannot find valid erofs superblock"); + goto out; + } + + blkszbits = layout->blkszbits; + /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ + if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { + errln("blksize %u isn't supported on this platform", + 1 << blkszbits); + goto out; + } + + if (!check_layout_compatibility(sb, layout)) + goto out; + + sbi->blocks = le32_to_cpu(layout->blocks); + sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); +#ifdef CONFIG_EROFS_FS_XATTR + sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); +#endif + sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; + sbi->root_nid = le16_to_cpu(layout->root_nid); + sbi->inos = le64_to_cpu(layout->inos); + + sbi->build_time = le64_to_cpu(layout->build_time); + sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); + + memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); + + ret = strscpy(sbi->volume_name, layout->volume_name, + sizeof(layout->volume_name)); + if (ret < 0) { /* -E2BIG */ + errln("bad volume name without NIL terminator"); + ret = -EFSCORRUPTED; + goto out; + } + ret = 0; +out: + brelse(bh); + return ret; +} + +#ifdef CONFIG_EROFS_FAULT_INJECTION +const char *erofs_fault_name[FAULT_MAX] = { + [FAULT_KMALLOC] = "kmalloc", + [FAULT_READ_IO] = "read IO error", +}; + +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (rate) { + atomic_set(&ffi->inject_ops, 0); + ffi->inject_rate = rate; + ffi->inject_type = (1 << FAULT_MAX) - 1; + } else { + memset(ffi, 0, sizeof(struct erofs_fault_info)); + } + + set_opt(sbi, FAULT_INJECTION); +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + int rate = 0; + + if (args->from && match_int(args, &rate)) + return -EINVAL; + + __erofs_build_fault_attr(sbi, rate); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return sbi->fault_info.inject_rate; +} +#else +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("fault_injection options not supported"); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return 0; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + const char *cs = match_strdup(args); + int err = 0; + + if (!cs) { + errln("Not enough memory to store cache strategy"); + return -ENOMEM; + } + + if (!strcmp(cs, "disabled")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; + } else if (!strcmp(cs, "readahead")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; + } else if (!strcmp(cs, "readaround")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + } else { + errln("Unrecognized cache strategy \"%s\"", cs); + err = -EINVAL; + } + kfree(cs); + return err; +} +#else +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("EROFS compression is disabled, so cache strategy is ignored"); + return 0; +} +#endif + +/* set up default EROFS parameters */ +static void default_options(struct erofs_sb_info *sbi) +{ +#ifdef CONFIG_EROFS_FS_ZIP + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + sbi->max_sync_decompress_pages = 3; +#endif +#ifdef CONFIG_EROFS_FS_XATTR + set_opt(sbi, XATTR_USER); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + set_opt(sbi, POSIX_ACL); +#endif +} + +enum { + Opt_user_xattr, + Opt_nouser_xattr, + Opt_acl, + Opt_noacl, + Opt_fault_injection, + Opt_cache_strategy, + Opt_err +}; + +static match_table_t erofs_tokens = { + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_acl, "acl"}, + {Opt_noacl, "noacl"}, + {Opt_fault_injection, "fault_injection=%u"}, + {Opt_cache_strategy, "cache_strategy=%s"}, + {Opt_err, NULL} +}; + +static int parse_options(struct super_block *sb, char *options) +{ + substring_t args[MAX_OPT_ARGS]; + char *p; + int err; + + if (!options) + return 0; + + while ((p = strsep(&options, ","))) { + int token; + + if (!*p) + continue; + + args[0].to = args[0].from = NULL; + token = match_token(p, erofs_tokens, args); + + switch (token) { +#ifdef CONFIG_EROFS_FS_XATTR + case Opt_user_xattr: + set_opt(EROFS_SB(sb), XATTR_USER); + break; + case Opt_nouser_xattr: + clear_opt(EROFS_SB(sb), XATTR_USER); + break; +#else + case Opt_user_xattr: + infoln("user_xattr options not supported"); + break; + case Opt_nouser_xattr: + infoln("nouser_xattr options not supported"); + break; +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + case Opt_acl: + set_opt(EROFS_SB(sb), POSIX_ACL); + break; + case Opt_noacl: + clear_opt(EROFS_SB(sb), POSIX_ACL); + break; +#else + case Opt_acl: + infoln("acl options not supported"); + break; + case Opt_noacl: + infoln("noacl options not supported"); + break; +#endif + case Opt_fault_injection: + err = erofs_build_fault_attr(EROFS_SB(sb), args); + if (err) + return err; + break; + case Opt_cache_strategy: + err = erofs_build_cache_strategy(EROFS_SB(sb), args); + if (err) + return err; + break; + default: + errln("Unrecognized mount option \"%s\" or missing value", p); + return -EINVAL; + } + } + return 0; +} + +#ifdef CONFIG_EROFS_FS_ZIP +static const struct address_space_operations managed_cache_aops; + +static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) +{ + int ret = 1; /* 0 - busy */ + struct address_space *const mapping = page->mapping; + + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(mapping->a_ops != &managed_cache_aops); + + if (PagePrivate(page)) + ret = erofs_try_to_free_cached_page(mapping, page); + + return ret; +} + +static void managed_cache_invalidatepage(struct page *page, + unsigned int offset, + unsigned int length) +{ + const unsigned int stop = length + offset; + + DBG_BUGON(!PageLocked(page)); + + /* Check for potential overflow in debug mode */ + DBG_BUGON(stop > PAGE_SIZE || stop < length); + + if (offset == 0 && stop == PAGE_SIZE) + while (!managed_cache_releasepage(page, GFP_NOFS)) + cond_resched(); +} + +static const struct address_space_operations managed_cache_aops = { + .releasepage = managed_cache_releasepage, + .invalidatepage = managed_cache_invalidatepage, +}; + +static int erofs_init_managed_cache(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + struct inode *const inode = new_inode(sb); + + if (unlikely(!inode)) + return -ENOMEM; + + set_nlink(inode, 1); + inode->i_size = OFFSET_MAX; + + inode->i_mapping->a_ops = &managed_cache_aops; + mapping_set_gfp_mask(inode->i_mapping, + GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); + sbi->managed_cache = inode; + return 0; +} +#else +static int erofs_init_managed_cache(struct super_block *sb) { return 0; } +#endif + +static int erofs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *inode; + struct erofs_sb_info *sbi; + int err; + + infoln("fill_super, device -> %s", sb->s_id); + infoln("options -> %s", (char *)data); + + sb->s_magic = EROFS_SUPER_MAGIC; + + if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { + errln("failed to set erofs blksize"); + return -EINVAL; + } + + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (unlikely(!sbi)) + return -ENOMEM; + + sb->s_fs_info = sbi; + err = superblock_read(sb); + if (err) + return err; + + sb->s_flags |= SB_RDONLY | SB_NOATIME; + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_time_gran = 1; + + sb->s_op = &erofs_sops; + +#ifdef CONFIG_EROFS_FS_XATTR + sb->s_xattr = erofs_xattr_handlers; +#endif + /* set erofs default mount options */ + default_options(sbi); + + err = parse_options(sb, data); + if (unlikely(err)) + return err; + + if (!silent) + infoln("root inode @ nid %llu", ROOT_NID(sbi)); + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + +#ifdef CONFIG_EROFS_FS_ZIP + INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); +#endif + + /* get the root inode */ + inode = erofs_iget(sb, ROOT_NID(sbi), true); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + if (unlikely(!S_ISDIR(inode->i_mode))) { + errln("rootino(nid %llu) is not a directory(i_mode %o)", + ROOT_NID(sbi), inode->i_mode); + iput(inode); + return -EINVAL; + } + + sb->s_root = d_make_root(inode); + if (unlikely(!sb->s_root)) + return -ENOMEM; + + erofs_shrinker_register(sb); + /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ + err = erofs_init_managed_cache(sb); + if (unlikely(err)) + return err; + + if (!silent) + infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); + return 0; +} + +static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); +} + +/* + * could be triggered after deactivate_locked_super() + * is called, thus including umount and failed to initialize. + */ +static void erofs_kill_sb(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + + WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); + infoln("unmounting for %s", sb->s_id); + + kill_block_super(sb); + + sbi = EROFS_SB(sb); + if (!sbi) + return; + kfree(sbi); + sb->s_fs_info = NULL; +} + +/* called when ->s_root is non-NULL */ +static void erofs_put_super(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + DBG_BUGON(!sbi); + + erofs_shrinker_unregister(sb); +#ifdef CONFIG_EROFS_FS_ZIP + iput(sbi->managed_cache); + sbi->managed_cache = NULL; +#endif +} + +static struct file_system_type erofs_fs_type = { + .owner = THIS_MODULE, + .name = "erofs", + .mount = erofs_mount, + .kill_sb = erofs_kill_sb, + .fs_flags = FS_REQUIRES_DEV, +}; +MODULE_ALIAS_FS("erofs"); + +static int __init erofs_module_init(void) +{ + int err; + + erofs_check_ondisk_layout_definitions(); + infoln("initializing erofs " EROFS_VERSION); + + err = erofs_init_inode_cache(); + if (err) + goto icache_err; + + err = erofs_init_shrinker(); + if (err) + goto shrinker_err; + + err = z_erofs_init_zip_subsystem(); + if (err) + goto zip_err; + + err = register_filesystem(&erofs_fs_type); + if (err) + goto fs_err; + + infoln("successfully to initialize erofs"); + return 0; + +fs_err: + z_erofs_exit_zip_subsystem(); +zip_err: + erofs_exit_shrinker(); +shrinker_err: + erofs_exit_inode_cache(); +icache_err: + return err; +} + +static void __exit erofs_module_exit(void) +{ + unregister_filesystem(&erofs_fs_type); + z_erofs_exit_zip_subsystem(); + erofs_exit_shrinker(); + erofs_exit_inode_cache(); + infoln("successfully finalize erofs"); +} + +/* get filesystem statistics */ +static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + + buf->f_type = sb->s_magic; + buf->f_bsize = EROFS_BLKSIZ; + buf->f_blocks = sbi->blocks; + buf->f_bfree = buf->f_bavail = 0; + + buf->f_files = ULLONG_MAX; + buf->f_ffree = ULLONG_MAX - sbi->inos; + + buf->f_namelen = EROFS_NAME_LEN; + + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + return 0; +} + +static int erofs_show_options(struct seq_file *seq, struct dentry *root) +{ + struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); + +#ifdef CONFIG_EROFS_FS_XATTR + if (test_opt(sbi, XATTR_USER)) + seq_puts(seq, ",user_xattr"); + else + seq_puts(seq, ",nouser_xattr"); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + if (test_opt(sbi, POSIX_ACL)) + seq_puts(seq, ",acl"); + else + seq_puts(seq, ",noacl"); +#endif + if (test_opt(sbi, FAULT_INJECTION)) + seq_printf(seq, ",fault_injection=%u", + erofs_get_fault_rate(sbi)); +#ifdef CONFIG_EROFS_FS_ZIP + if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { + seq_puts(seq, ",cache_strategy=disabled"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { + seq_puts(seq, ",cache_strategy=readahead"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { + seq_puts(seq, ",cache_strategy=readaround"); + } else { + seq_puts(seq, ",cache_strategy=(unknown)"); + DBG_BUGON(1); + } +#endif + return 0; +} + +static int erofs_remount(struct super_block *sb, int *flags, char *data) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + unsigned int org_mnt_opt = sbi->mount_opt; + unsigned int org_inject_rate = erofs_get_fault_rate(sbi); + int err; + + DBG_BUGON(!sb_rdonly(sb)); + err = parse_options(sb, data); + if (err) + goto out; + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + + *flags |= SB_RDONLY; + return 0; +out: + __erofs_build_fault_attr(sbi, org_inject_rate); + sbi->mount_opt = org_mnt_opt; + + return err; +} + +const struct super_operations erofs_sops = { + .put_super = erofs_put_super, + .alloc_inode = alloc_inode, + .free_inode = free_inode, + .statfs = erofs_statfs, + .show_options = erofs_show_options, + .remount_fs = erofs_remount, +}; + +module_init(erofs_module_init); +module_exit(erofs_module_exit); + +MODULE_DESCRIPTION("Enhanced ROM File System"); +MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); +MODULE_LICENSE("GPL"); + diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h new file mode 100644 index 000000000000..a72897c86744 --- /dev/null +++ b/fs/erofs/tagptr.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A tagged pointer implementation + * + * Copyright (C) 2018 Gao Xiang + */ +#ifndef __EROFS_FS_TAGPTR_H +#define __EROFS_FS_TAGPTR_H + +#include +#include + +/* + * the name of tagged pointer types are tagptr{1, 2, 3...}_t + * avoid directly using the internal structs __tagptr{1, 2, 3...} + */ +#define __MAKE_TAGPTR(n) \ +typedef struct __tagptr##n { \ + uintptr_t v; \ +} tagptr##n##_t; + +__MAKE_TAGPTR(1) +__MAKE_TAGPTR(2) +__MAKE_TAGPTR(3) +__MAKE_TAGPTR(4) + +#undef __MAKE_TAGPTR + +extern void __compiletime_error("bad tagptr tags") + __bad_tagptr_tags(void); + +extern void __compiletime_error("bad tagptr type") + __bad_tagptr_type(void); + +/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ +#define __tagptr_mask_1(ptr, n) \ + __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ + (1UL << (n)) - 1 : + +#define __tagptr_mask(ptr) (\ + __tagptr_mask_1(ptr, 1) ( \ + __tagptr_mask_1(ptr, 2) ( \ + __tagptr_mask_1(ptr, 3) ( \ + __tagptr_mask_1(ptr, 4) ( \ + __bad_tagptr_type(), 0))))) + +/* generate a tagged pointer from a raw value */ +#define tagptr_init(type, val) \ + ((typeof(type)){ .v = (uintptr_t)(val) }) + +/* + * directly cast a tagged pointer to the native pointer type, which + * could be used for backward compatibility of existing code. + */ +#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) + +/* encode tagged pointers */ +#define tagptr_fold(type, ptr, _tags) ({ \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ + __bad_tagptr_tags(); \ +tagptr_init(type, (uintptr_t)(ptr) | tags); }) + +/* decode tagged pointers */ +#define tagptr_unfold_ptr(tptr) \ + ((void *)((tptr).v & ~__tagptr_mask(tptr))) + +#define tagptr_unfold_tags(tptr) \ + ((tptr).v & __tagptr_mask(tptr)) + +/* operations for the tagger pointer */ +#define tagptr_eq(_tptr1, _tptr2) ({ \ + typeof(_tptr1) tptr1 = (_tptr1); \ + typeof(_tptr2) tptr2 = (_tptr2); \ + (void)(&tptr1 == &tptr2); \ +(tptr1).v == (tptr2).v; }) + +/* lock-free CAS operation */ +#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + typeof(_o) o = (_o); \ + typeof(_n) n = (_n); \ + (void)(&o == &n); \ + (void)(&o == ptptr); \ +tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) + +/* wrap WRITE_ONCE if atomic update is needed */ +#define tagptr_replace_tags(_ptptr, tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ +*ptptr; }) + +#define tagptr_set_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v |= tags; \ +*ptptr; }) + +#define tagptr_clear_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v &= ~tags; \ +*ptptr; }) + +#endif /* __EROFS_FS_TAGPTR_H */ + diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c new file mode 100644 index 000000000000..1dd041aa0f5a --- /dev/null +++ b/fs/erofs/utils.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) +{ + struct page *page; + + if (!list_empty(pool)) { + page = lru_to_page(pool); + DBG_BUGON(page_ref_count(page) != 1); + list_del(&page->lru); + } else { + page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); + } + return page; +} + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +static struct { + u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; +} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; + +void *erofs_get_pcpubuf(unsigned int pagenr) +{ + preempt_disable(); + return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +/* global shrink count (for all mounted EROFS instances) */ +static atomic_long_t erofs_global_shrink_cnt; + +#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) + +static int erofs_workgroup_get(struct erofs_workgroup *grp) +{ + int o; + +repeat: + o = erofs_wait_on_workgroup_freezed(grp); + if (unlikely(o <= 0)) + return -1; + + if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) + goto repeat; + + /* decrease refcount paired by erofs_workgroup_put */ + if (unlikely(o == 1)) + atomic_long_dec(&erofs_global_shrink_cnt); + return 0; +} + +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct erofs_workgroup *grp; + +repeat: + rcu_read_lock(); + grp = radix_tree_lookup(&sbi->workstn_tree, index); + if (grp) { + *tag = xa_pointer_tag(grp); + grp = xa_untag_pointer(grp); + + if (erofs_workgroup_get(grp)) { + /* prefer to relax rcu read side */ + rcu_read_unlock(); + goto repeat; + } + + DBG_BUGON(index != grp->index); + } + rcu_read_unlock(); + return grp; +} + +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, + bool tag) +{ + struct erofs_sb_info *sbi; + int err; + + /* grp shouldn't be broken or used before */ + if (unlikely(atomic_read(&grp->refcount) != 1)) { + DBG_BUGON(1); + return -EINVAL; + } + + err = radix_tree_preload(GFP_NOFS); + if (err) + return err; + + sbi = EROFS_SB(sb); + xa_lock(&sbi->workstn_tree); + + grp = xa_tag_pointer(grp, tag); + + /* + * Bump up reference count before making this workgroup + * visible to other users in order to avoid potential UAF + * without serialized by workstn_lock. + */ + __erofs_workgroup_get(grp); + + err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); + if (unlikely(err)) + /* + * it's safe to decrease since the workgroup isn't visible + * and refcount >= 2 (cannot be freezed). + */ + __erofs_workgroup_put(grp); + + xa_unlock(&sbi->workstn_tree); + radix_tree_preload_end(); + return err; +} + +static void __erofs_workgroup_free(struct erofs_workgroup *grp) +{ + atomic_long_dec(&erofs_global_shrink_cnt); + erofs_workgroup_free_rcu(grp); +} + +int erofs_workgroup_put(struct erofs_workgroup *grp) +{ + int count = atomic_dec_return(&grp->refcount); + + if (count == 1) + atomic_long_inc(&erofs_global_shrink_cnt); + else if (!count) + __erofs_workgroup_free(grp); + return count; +} + +static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) +{ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); +} + +static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) +{ + /* + * If managed cache is on, refcount of workgroups + * themselves could be < 0 (freezed). In other words, + * there is no guarantee that all refcounts > 0. + */ + if (!erofs_workgroup_try_to_freeze(grp, 1)) + return false; + + /* + * Note that all cached pages should be unattached + * before deleted from the radix tree. Otherwise some + * cached pages could be still attached to the orphan + * old workgroup when the new one is available in the tree. + */ + if (erofs_try_to_free_all_cached_pages(sbi, grp)) { + erofs_workgroup_unfreeze(grp, 1); + return false; + } + + /* + * It's impossible to fail after the workgroup is freezed, + * however in order to avoid some race conditions, add a + * DBG_BUGON to observe this in advance. + */ + DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, + grp->index)) != grp); + + /* + * If managed cache is on, last refcount should indicate + * the related workstation. + */ + erofs_workgroup_unfreeze_final(grp); + return true; +} + +static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, + unsigned long nr_shrink, + bool cleanup) +{ + pgoff_t first_index = 0; + void *batch[PAGEVEC_SIZE]; + unsigned int freed = 0; + + int i, found; +repeat: + xa_lock(&sbi->workstn_tree); + + found = radix_tree_gang_lookup(&sbi->workstn_tree, + batch, first_index, PAGEVEC_SIZE); + + for (i = 0; i < found; ++i) { + struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); + + first_index = grp->index + 1; + + /* try to shrink each valid workgroup */ + if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) + continue; + + ++freed; + if (unlikely(!--nr_shrink)) + break; + } + xa_unlock(&sbi->workstn_tree); + + if (i && nr_shrink) + goto repeat; + return freed; +} + +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + +/* protects the mounted 'erofs_sb_list' */ +static DEFINE_SPINLOCK(erofs_sb_list_lock); +static LIST_HEAD(erofs_sb_list); + +void erofs_shrinker_register(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + mutex_init(&sbi->umount_mutex); + + spin_lock(&erofs_sb_list_lock); + list_add(&sbi->list, &erofs_sb_list); + spin_unlock(&erofs_sb_list_lock); +} + +void erofs_shrinker_unregister(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + mutex_lock(&sbi->umount_mutex); + erofs_shrink_workstation(sbi, ~0UL, true); + + spin_lock(&erofs_sb_list_lock); + list_del(&sbi->list); + spin_unlock(&erofs_sb_list_lock); + mutex_unlock(&sbi->umount_mutex); +} + +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + return atomic_long_read(&erofs_global_shrink_cnt); +} + +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct erofs_sb_info *sbi; + struct list_head *p; + + unsigned long nr = sc->nr_to_scan; + unsigned int run_no; + unsigned long freed = 0; + + spin_lock(&erofs_sb_list_lock); + do { + run_no = ++shrinker_run_no; + } while (run_no == 0); + + /* Iterate over all mounted superblocks and try to shrink them */ + p = erofs_sb_list.next; + while (p != &erofs_sb_list) { + sbi = list_entry(p, struct erofs_sb_info, list); + + /* + * We move the ones we do to the end of the list, so we stop + * when we see one we have already done. + */ + if (sbi->shrinker_run_no == run_no) + break; + + if (!mutex_trylock(&sbi->umount_mutex)) { + p = p->next; + continue; + } + + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + + freed += erofs_shrink_workstation(sbi, nr, false); + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ + p = p->next; + + /* + * Move this one to the end of the list to provide some + * fairness. + */ + list_move_tail(&sbi->list, &erofs_sb_list); + mutex_unlock(&sbi->umount_mutex); + + if (freed >= nr) + break; + } + spin_unlock(&erofs_sb_list_lock); + return freed; +} + +static struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + +int __init erofs_init_shrinker(void) +{ + return register_shrinker(&erofs_shrinker_info); +} + +void erofs_exit_shrinker(void) +{ + unregister_shrinker(&erofs_shrinker_info); +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c new file mode 100644 index 000000000000..a8286998a079 --- /dev/null +++ b/fs/erofs/xattr.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include "xattr.h" + +struct xattr_iter { + struct super_block *sb; + struct page *page; + void *kaddr; + + erofs_blk_t blkaddr; + unsigned int ofs; +}; + +static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) +{ + /* the only user of kunmap() is 'init_inode_xattrs' */ + if (unlikely(!atomic)) + kunmap(it->page); + else + kunmap_atomic(it->kaddr); + + unlock_page(it->page); + put_page(it->page); +} + +static inline void xattr_iter_end_final(struct xattr_iter *it) +{ + if (!it->page) + return; + + xattr_iter_end(it, true); +} + +static int init_inode_xattrs(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct xattr_iter it; + unsigned int i; + struct erofs_xattr_ibody_header *ih; + struct super_block *sb; + struct erofs_sb_info *sbi; + bool atomic_map; + int ret = 0; + + /* the most case is that xattrs of this inode are initialized. */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + /* someone has initialized xattrs for us? */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + goto out_unlock; + + /* + * bypass all xattr operations if ->xattr_isize is not greater than + * sizeof(struct erofs_xattr_ibody_header), in detail: + * 1) it is not enough to contain erofs_xattr_ibody_header then + * ->xattr_isize should be 0 (it means no xattr); + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk + * undefined right now (maybe use later with some new sb feature). + */ + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { + errln("xattr_isize %d of nid %llu is not supported yet", + vi->xattr_isize, vi->nid); + ret = -EOPNOTSUPP; + goto out_unlock; + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { + if (unlikely(vi->xattr_isize)) { + errln("bogus xattr ibody @ nid %llu", vi->nid); + DBG_BUGON(1); + ret = -EFSCORRUPTED; + goto out_unlock; /* xattr ondisk layout error */ + } + ret = -ENOATTR; + goto out_unlock; + } + + sb = inode->i_sb; + sbi = EROFS_SB(sb); + it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); + it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); + + it.page = erofs_get_inline_page(inode, it.blkaddr); + if (IS_ERR(it.page)) { + ret = PTR_ERR(it.page); + goto out_unlock; + } + + /* read in shared xattr array (non-atomic, see kmalloc below) */ + it.kaddr = kmap(it.page); + atomic_map = false; + + ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); + + vi->xattr_shared_count = ih->h_shared_count; + vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, + sizeof(uint), GFP_KERNEL); + if (!vi->xattr_shared_xattrs) { + xattr_iter_end(&it, atomic_map); + ret = -ENOMEM; + goto out_unlock; + } + + /* let's skip ibody header */ + it.ofs += sizeof(struct erofs_xattr_ibody_header); + + for (i = 0; i < vi->xattr_shared_count; ++i) { + if (unlikely(it.ofs >= EROFS_BLKSIZ)) { + /* cannot be unaligned */ + DBG_BUGON(it.ofs != EROFS_BLKSIZ); + xattr_iter_end(&it, atomic_map); + + it.page = erofs_get_meta_page(sb, ++it.blkaddr, + S_ISDIR(inode->i_mode)); + if (IS_ERR(it.page)) { + kfree(vi->xattr_shared_xattrs); + vi->xattr_shared_xattrs = NULL; + ret = PTR_ERR(it.page); + goto out_unlock; + } + + it.kaddr = kmap_atomic(it.page); + atomic_map = true; + it.ofs = 0; + } + vi->xattr_shared_xattrs[i] = + le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); + it.ofs += sizeof(__le32); + } + xattr_iter_end(&it, atomic_map); + + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + return ret; +} + +/* + * the general idea for these return values is + * if 0 is returned, go on processing the current xattr; + * 1 (> 0) is returned, skip this round to process the next xattr; + * -err (< 0) is returned, an error (maybe ENOXATTR) occurred + * and need to be handled + */ +struct xattr_iter_handlers { + int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); + int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); + int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); + void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); +}; + +static inline int xattr_iter_fixup(struct xattr_iter *it) +{ + if (it->ofs < EROFS_BLKSIZ) + return 0; + + xattr_iter_end(it, true); + + it->blkaddr += erofs_blknr(it->ofs); + + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); + if (IS_ERR(it->page)) { + int err = PTR_ERR(it->page); + + it->page = NULL; + return err; + } + + it->kaddr = kmap_atomic(it->page); + it->ofs = erofs_blkoff(it->ofs); + return 0; +} + +static int inline_xattr_iter_begin(struct xattr_iter *it, + struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); + unsigned int xattr_header_sz, inline_xattr_ofs; + + xattr_header_sz = inlinexattr_header_size(inode); + if (unlikely(xattr_header_sz >= vi->xattr_isize)) { + DBG_BUGON(xattr_header_sz > vi->xattr_isize); + return -ENOATTR; + } + + inline_xattr_ofs = vi->inode_isize + xattr_header_sz; + + it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); + it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); + + it->page = erofs_get_inline_page(inode, it->blkaddr); + if (IS_ERR(it->page)) + return PTR_ERR(it->page); + + it->kaddr = kmap_atomic(it->page); + return vi->xattr_isize - xattr_header_sz; +} + +/* + * Regardless of success or failure, `xattr_foreach' will end up with + * `ofs' pointing to the next xattr item rather than an arbitrary position. + */ +static int xattr_foreach(struct xattr_iter *it, + const struct xattr_iter_handlers *op, + unsigned int *tlimit) +{ + struct erofs_xattr_entry entry; + unsigned int value_sz, processed, slice; + int err; + + /* 0. fixup blkaddr, ofs, ipage */ + err = xattr_iter_fixup(it); + if (err) + return err; + + /* + * 1. read xattr entry to the memory, + * since we do EROFS_XATTR_ALIGN + * therefore entry should be in the page + */ + entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); + if (tlimit) { + unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); + + /* xattr on-disk corruption: xattr entry beyond xattr_isize */ + if (unlikely(*tlimit < entry_sz)) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + *tlimit -= entry_sz; + } + + it->ofs += sizeof(struct erofs_xattr_entry); + value_sz = le16_to_cpu(entry.e_value_size); + + /* handle entry */ + err = op->entry(it, &entry); + if (err) { + it->ofs += entry.e_name_len + value_sz; + goto out; + } + + /* 2. handle xattr name (ofs will finally be at the end of name) */ + processed = 0; + + while (processed < entry.e_name_len) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + entry.e_name_len - processed); + + /* handle name */ + err = op->name(it, processed, it->kaddr + it->ofs, slice); + if (err) { + it->ofs += entry.e_name_len - processed + value_sz; + goto out; + } + + it->ofs += slice; + processed += slice; + } + + /* 3. handle xattr value */ + processed = 0; + + if (op->alloc_buffer) { + err = op->alloc_buffer(it, value_sz); + if (err) { + it->ofs += value_sz; + goto out; + } + } + + while (processed < value_sz) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + value_sz - processed); + op->value(it, processed, it->kaddr + it->ofs, slice); + it->ofs += slice; + processed += slice; + } + +out: + /* xattrs should be 4-byte aligned (on-disk constraint) */ + it->ofs = EROFS_XATTR_ALIGN(it->ofs); + return err < 0 ? err : 0; +} + +struct getxattr_iter { + struct xattr_iter it; + + char *buffer; + int buffer_size, index; + struct qstr name; +}; + +static int xattr_entrymatch(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return (it->index != entry->e_name_index || + it->name.len != entry->e_name_len) ? -ENOATTR : 0; +} + +static int xattr_namematch(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; +} + +static int xattr_checkbuffer(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + int err = it->buffer_size < value_sz ? -ERANGE : 0; + + it->buffer_size = value_sz; + return !it->buffer ? 1 : err; +} + +static void xattr_copyvalue(struct xattr_iter *_it, + unsigned int processed, + char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + memcpy(it->buffer + processed, buf, len); +} + +static const struct xattr_iter_handlers find_xattr_handlers = { + .entry = xattr_entrymatch, + .name = xattr_namematch, + .alloc_buffer = xattr_checkbuffer, + .value = xattr_copyvalue +}; + +static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, inode); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); + if (ret != -ENOATTR) + break; + } + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = -ENOATTR; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); + if (ret != -ENOATTR) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static bool erofs_xattr_user_list(struct dentry *dentry) +{ + return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); +} + +static bool erofs_xattr_trusted_list(struct dentry *dentry) +{ + return capable(CAP_SYS_ADMIN); +} + +int erofs_getxattr(struct inode *inode, int index, + const char *name, + void *buffer, size_t buffer_size) +{ + int ret; + struct getxattr_iter it; + + if (unlikely(!name)) + return -EINVAL; + + ret = init_inode_xattrs(inode); + if (ret) + return ret; + + it.index = index; + + it.name.len = strlen(name); + if (it.name.len > EROFS_NAME_LEN) + return -ERANGE; + it.name.name = name; + + it.buffer = buffer; + it.buffer_size = buffer_size; + + it.it.sb = inode->i_sb; + ret = inline_getxattr(inode, &it); + if (ret == -ENOATTR) + ret = shared_getxattr(inode, &it); + return ret; +} + +static int erofs_xattr_generic_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + switch (handler->flags) { + case EROFS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + break; + case EROFS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + break; + case EROFS_XATTR_INDEX_SECURITY: + break; + default: + return -EINVAL; + } + + return erofs_getxattr(inode, handler->flags, name, buffer, size); +} + +const struct xattr_handler erofs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = EROFS_XATTR_INDEX_USER, + .list = erofs_xattr_user_list, + .get = erofs_xattr_generic_get, +}; + +const struct xattr_handler erofs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .flags = EROFS_XATTR_INDEX_TRUSTED, + .list = erofs_xattr_trusted_list, + .get = erofs_xattr_generic_get, +}; + +#ifdef CONFIG_EROFS_FS_SECURITY +const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = EROFS_XATTR_INDEX_SECURITY, + .get = erofs_xattr_generic_get, +}; +#endif + +const struct xattr_handler *erofs_xattr_handlers[] = { + &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, +#endif + &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + &erofs_xattr_security_handler, +#endif + NULL, +}; + +struct listxattr_iter { + struct xattr_iter it; + + struct dentry *dentry; + char *buffer; + int buffer_size, buffer_ofs; +}; + +static int xattr_entrylist(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + unsigned int prefix_len; + const char *prefix; + + const struct xattr_handler *h = + erofs_xattr_handler(entry->e_name_index); + + if (!h || (h->list && !h->list(it->dentry))) + return 1; + + prefix = xattr_prefix(h); + prefix_len = strlen(prefix); + + if (!it->buffer) { + it->buffer_ofs += prefix_len + entry->e_name_len + 1; + return 1; + } + + if (it->buffer_ofs + prefix_len + + entry->e_name_len + 1 > it->buffer_size) + return -ERANGE; + + memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); + it->buffer_ofs += prefix_len; + return 0; +} + +static int xattr_namelist(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + memcpy(it->buffer + it->buffer_ofs, buf, len); + it->buffer_ofs += len; + return 0; +} + +static int xattr_skipvalue(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + it->buffer[it->buffer_ofs++] = '\0'; + return 1; +} + +static const struct xattr_iter_handlers list_xattr_handlers = { + .entry = xattr_entrylist, + .name = xattr_namelist, + .alloc_buffer = xattr_skipvalue, + .value = NULL +}; + +static int inline_listxattr(struct listxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); + if (ret) + break; + } + xattr_iter_end_final(&it->it); + return ret ? ret : it->buffer_ofs; +} + +static int shared_listxattr(struct listxattr_iter *it) +{ + struct inode *const inode = d_inode(it->dentry); + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = 0; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); + if (ret) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_ofs; +} + +ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + int ret; + struct listxattr_iter it; + + ret = init_inode_xattrs(d_inode(dentry)); + if (ret) + return ret; + + it.dentry = dentry; + it.buffer = buffer; + it.buffer_size = buffer_size; + it.buffer_ofs = 0; + + it.it.sb = dentry->d_sb; + + ret = inline_listxattr(&it); + if (ret < 0 && ret != -ENOATTR) + return ret; + return shared_listxattr(&it); +} + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int prefix, rc; + char *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + + rc = erofs_getxattr(inode, prefix, "", NULL, 0); + if (rc > 0) { + value = kmalloc(rc, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + rc = erofs_getxattr(inode, prefix, "", value, rc); + } + + if (rc == -ENOATTR) + acl = NULL; + else if (rc < 0) + acl = ERR_PTR(rc); + else + acl = posix_acl_from_xattr(&init_user_ns, value, rc); + kfree(value); + return acl; +} +#endif + diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h new file mode 100644 index 000000000000..c5ca47d814dd --- /dev/null +++ b/fs/erofs/xattr.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_XATTR_H +#define __EROFS_XATTR_H + +#include "internal.h" +#include +#include + +/* Attribute not found */ +#define ENOATTR ENODATA + +static inline unsigned int inlinexattr_header_size(struct inode *inode) +{ + return sizeof(struct erofs_xattr_ibody_header) + + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; +} + +static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ +#ifdef CONFIG_EROFS_FS_XATTR + return sbi->xattr_blkaddr + + xattr_id * sizeof(__u32) / EROFS_BLKSIZ; +#else + return 0; +#endif +} + +static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ + return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; +} + +#ifdef CONFIG_EROFS_FS_XATTR +extern const struct xattr_handler erofs_xattr_user_handler; +extern const struct xattr_handler erofs_xattr_trusted_handler; +#ifdef CONFIG_EROFS_FS_SECURITY +extern const struct xattr_handler erofs_xattr_security_handler; +#endif + +static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) +{ +static const struct xattr_handler *xattr_handler_map[] = { + [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, + [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = + &posix_acl_default_xattr_handler, +#endif + [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, +#endif +}; + + return idx && idx < ARRAY_SIZE(xattr_handler_map) ? + xattr_handler_map[idx] : NULL; +} + +extern const struct xattr_handler *erofs_xattr_handlers[]; + +int erofs_getxattr(struct inode *, int, const char *, void *, size_t); +ssize_t erofs_listxattr(struct dentry *, char *, size_t); +#else +static inline int erofs_getxattr(struct inode *inode, int index, + const char *name, void *buffer, + size_t buffer_size) +{ + return -EOPNOTSUPP; +} + +static inline ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_XATTR */ + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type); +#else +#define erofs_get_acl (NULL) +#endif + +#endif + diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c new file mode 100644 index 000000000000..b32ad585237c --- /dev/null +++ b/fs/erofs/zdata.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "zdata.h" +#include "compress.h" +#include + +#include + +/* + * a compressed_pages[] placeholder in order to avoid + * being filled with file pages for in-place decompression. + */ +#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) + +/* how to allocate cached pages for a pcluster */ +enum z_erofs_cache_alloctype { + DONTALLOC, /* don't allocate any cached pages */ + DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ +}; + +/* + * tagged pointer with 1-bit tag for all compressed pages + * tag 0 - the page is just found with an extra page reference + */ +typedef tagptr1_t compressed_page_t; + +#define tag_compressed_page_justfound(page) \ + tagptr_fold(compressed_page_t, page, 1) + +static struct workqueue_struct *z_erofs_workqueue __read_mostly; +static struct kmem_cache *pcluster_cachep __read_mostly; + +void z_erofs_exit_zip_subsystem(void) +{ + destroy_workqueue(z_erofs_workqueue); + kmem_cache_destroy(pcluster_cachep); +} + +static inline int init_unzip_workqueue(void) +{ + const unsigned int onlinecpus = num_possible_cpus(); + const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; + + /* + * no need to spawn too many threads, limiting threads could minimum + * scheduling overhead, perhaps per-CPU threads should be better? + */ + z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, + onlinecpus + onlinecpus / 4); + return z_erofs_workqueue ? 0 : -ENOMEM; +} + +static void init_once(void *ptr) +{ + struct z_erofs_pcluster *pcl = ptr; + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + unsigned int i; + + mutex_init(&cl->lock); + cl->nr_pages = 0; + cl->vcnt = 0; + for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) + pcl->compressed_pages[i] = NULL; +} + +static void init_always(struct z_erofs_pcluster *pcl) +{ + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + + atomic_set(&pcl->obj.refcount, 1); + + DBG_BUGON(cl->nr_pages); + DBG_BUGON(cl->vcnt); +} + +int __init z_erofs_init_zip_subsystem(void) +{ + pcluster_cachep = kmem_cache_create("erofs_compress", + Z_EROFS_WORKGROUP_SIZE, 0, + SLAB_RECLAIM_ACCOUNT, init_once); + if (pcluster_cachep) { + if (!init_unzip_workqueue()) + return 0; + + kmem_cache_destroy(pcluster_cachep); + } + return -ENOMEM; +} + +enum z_erofs_collectmode { + COLLECT_SECONDARY, + COLLECT_PRIMARY, + /* + * The current collection was the tail of an exist chain, in addition + * that the previous processed chained collections are all decided to + * be hooked up to it. + * A new chain will be created for the remaining collections which are + * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, + * the next collection cannot reuse the whole page safely in + * the following scenario: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (belongs to the next cl) | (belongs to the current cl) | + * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| + */ + COLLECT_PRIMARY_HOOKED, + COLLECT_PRIMARY_FOLLOWED_NOINPLACE, + /* + * The current collection has been linked with the owned chain, and + * could also be linked with the remaining collections, which means + * if the processing page is the tail page of the collection, thus + * the current collection can safely use the whole page (since + * the previous collection is under control) for in-place I/O, as + * illustrated below: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (of the current cl) | (of the previous collection) | + * | PRIMARY_FOLLOWED or | | + * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| + * + * [ (*) the above page can be used as inplace I/O. ] + */ + COLLECT_PRIMARY_FOLLOWED, +}; + +struct z_erofs_collector { + struct z_erofs_pagevec_ctor vector; + + struct z_erofs_pcluster *pcl, *tailpcl; + struct z_erofs_collection *cl; + struct page **compressedpages; + z_erofs_next_pcluster_t owned_head; + + enum z_erofs_collectmode mode; +}; + +struct z_erofs_decompress_frontend { + struct inode *const inode; + + struct z_erofs_collector clt; + struct erofs_map_blocks map; + + /* used for applying cache strategy on the fly */ + bool backmost; + erofs_off_t headoffset; +}; + +#define COLLECTOR_INIT() { \ + .owned_head = Z_EROFS_PCLUSTER_TAIL, \ + .mode = COLLECT_PRIMARY_FOLLOWED } + +#define DECOMPRESS_FRONTEND_INIT(__i) { \ + .inode = __i, .clt = COLLECTOR_INIT(), \ + .backmost = true, } + +static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; +static DEFINE_MUTEX(z_pagemap_global_lock); + +static void preload_compressed_pages(struct z_erofs_collector *clt, + struct address_space *mc, + enum z_erofs_cache_alloctype type, + struct list_head *pagepool) +{ + const struct z_erofs_pcluster *pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct page **pages = clt->compressedpages; + pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); + bool standalone = true; + + if (clt->mode < COLLECT_PRIMARY_FOLLOWED) + return; + + for (; pages < pcl->compressed_pages + clusterpages; ++pages) { + struct page *page; + compressed_page_t t; + + /* the compressed page was loaded before */ + if (READ_ONCE(*pages)) + continue; + + page = find_get_page(mc, index); + + if (page) { + t = tag_compressed_page_justfound(page); + } else if (type == DELAYEDALLOC) { + t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); + } else { /* DONTALLOC */ + if (standalone) + clt->compressedpages = pages; + standalone = false; + continue; + } + + if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) + continue; + + if (page) + put_page(page); + } + + if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ + clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; +} + +/* called by erofs_shrinker to get rid of all compressed_pages */ +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct address_space *const mapping = MNGD_MAPPING(sbi); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int i; + + /* + * refcount of workgroup is now freezed as 1, + * therefore no need to worry about available decompression users. + */ + for (i = 0; i < clusterpages; ++i) { + struct page *page = pcl->compressed_pages[i]; + + if (!page) + continue; + + /* block other users from reclaiming or migrating the page */ + if (!trylock_page(page)) + return -EBUSY; + + if (unlikely(page->mapping != mapping)) + continue; + + /* barrier is implied in the following 'unlock_page' */ + WRITE_ONCE(pcl->compressed_pages[i], NULL); + set_page_private(page, 0); + ClearPagePrivate(page); + + unlock_page(page); + put_page(page); + } + return 0; +} + +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = (void *)page_private(page); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int ret = 0; /* 0 - busy */ + + if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { + unsigned int i; + + for (i = 0; i < clusterpages; ++i) { + if (pcl->compressed_pages[i] == page) { + WRITE_ONCE(pcl->compressed_pages[i], NULL); + ret = 1; + break; + } + } + erofs_workgroup_unfreeze(&pcl->obj, 1); + + if (ret) { + ClearPagePrivate(page); + put_page(page); + } + } + return ret; +} + +/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ +static inline bool try_inplace_io(struct z_erofs_collector *clt, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + + while (clt->compressedpages < pcl->compressed_pages + clusterpages) { + if (!cmpxchg(clt->compressedpages++, NULL, page)) + return true; + } + return false; +} + +/* callers must be with collection lock held */ +static int z_erofs_attach_page(struct z_erofs_collector *clt, + struct page *page, + enum z_erofs_page_type type) +{ + int ret; + bool occupied; + + /* give priority for inplaceio */ + if (clt->mode >= COLLECT_PRIMARY && + type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && + try_inplace_io(clt, page)) + return 0; + + ret = z_erofs_pagevec_enqueue(&clt->vector, + page, type, &occupied); + clt->cl->vcnt += (unsigned int)ret; + + return ret ? 0 : -EAGAIN; +} + +static enum z_erofs_collectmode +try_to_claim_pcluster(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t *owned_head) +{ + /* let's claim these following types of pclusters */ +retry: + if (pcl->next == Z_EROFS_PCLUSTER_NIL) { + /* type 1, nil pcluster */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, + *owned_head) != Z_EROFS_PCLUSTER_NIL) + goto retry; + + *owned_head = &pcl->next; + /* lucky, I am the followee :) */ + return COLLECT_PRIMARY_FOLLOWED; + } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { + /* + * type 2, link to the end of a existing open chain, + * be careful that its submission itself is governed + * by the original owned chain. + */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + *owned_head) != Z_EROFS_PCLUSTER_TAIL) + goto retry; + *owned_head = Z_EROFS_PCLUSTER_TAIL; + return COLLECT_PRIMARY_HOOKED; + } + return COLLECT_PRIMARY; /* :( better luck next time */ +} + +static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct erofs_workgroup *grp; + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + unsigned int length; + bool tag; + + grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); + if (!grp) + return NULL; + + pcl = container_of(grp, struct z_erofs_pcluster, obj); + if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + cl = z_erofs_primarycollection(pcl); + if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + length = READ_ONCE(pcl->length); + if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { + if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + } else { + unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; + + if (map->m_flags & EROFS_MAP_FULL_MAPPED) + llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; + + while (llen > length && + length != cmpxchg_relaxed(&pcl->length, length, llen)) { + cpu_relax(); + length = READ_ONCE(pcl->length); + } + } + mutex_lock(&cl->lock); + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); + /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = NULL; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + int err; + + /* no available workgroup, let's allocate one */ + pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); + if (unlikely(!pcl)) + return ERR_PTR(-ENOMEM); + + init_always(pcl); + pcl->obj.index = map->m_pa >> PAGE_SHIFT; + + pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | + (map->m_flags & EROFS_MAP_FULL_MAPPED ? + Z_EROFS_PCLUSTER_FULL_LENGTH : 0); + + if (map->m_flags & EROFS_MAP_ZIPPED) + pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; + else + pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; + + pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; + pcl->clusterbits -= PAGE_SHIFT; + + /* new pclusters should be claimed as type 1, primary and followed */ + pcl->next = clt->owned_head; + clt->mode = COLLECT_PRIMARY_FOLLOWED; + + cl = z_erofs_primarycollection(pcl); + cl->pageofs = map->m_la & ~PAGE_MASK; + + /* + * lock all primary followed works before visible to others + * and mutex_trylock *never* fails for a new pcluster. + */ + mutex_trylock(&cl->lock); + + err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); + if (err) { + mutex_unlock(&cl->lock); + kmem_cache_free(pcluster_cachep, pcl); + return ERR_PTR(-EAGAIN); + } + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->owned_head = &pcl->next; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static int z_erofs_collector_begin(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_collection *cl; + + DBG_BUGON(clt->cl); + + /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + + if (!PAGE_ALIGNED(map->m_pa)) { + DBG_BUGON(1); + return -EINVAL; + } + +repeat: + cl = cllookup(clt, inode, map); + if (!cl) { + cl = clregister(clt, inode, map); + + if (unlikely(cl == ERR_PTR(-EAGAIN))) + goto repeat; + } + + if (IS_ERR(cl)) + return PTR_ERR(cl); + + z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, cl->vcnt); + + clt->compressedpages = clt->pcl->compressed_pages; + if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ + clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; + return 0; +} + +/* + * keep in mind that no referenced pclusters will be freed + * only after a RCU grace period. + */ +static void z_erofs_rcu_callback(struct rcu_head *head) +{ + struct z_erofs_collection *const cl = + container_of(head, struct z_erofs_collection, rcu); + + kmem_cache_free(pcluster_cachep, + container_of(cl, struct z_erofs_pcluster, + primary_collection)); +} + +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); + + call_rcu(&cl->rcu, z_erofs_rcu_callback); +} + +static void z_erofs_collection_put(struct z_erofs_collection *cl) +{ + struct z_erofs_pcluster *const pcl = + container_of(cl, struct z_erofs_pcluster, primary_collection); + + erofs_workgroup_put(&pcl->obj); +} + +static bool z_erofs_collector_end(struct z_erofs_collector *clt) +{ + struct z_erofs_collection *cl = clt->cl; + + if (!cl) + return false; + + z_erofs_pagevec_ctor_exit(&clt->vector, false); + mutex_unlock(&cl->lock); + + /* + * if all pending pages are added, don't hold its reference + * any longer if the pcluster isn't hosted by ourselves. + */ + if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) + z_erofs_collection_put(cl); + + clt->cl = NULL; + return true; +} + +static inline struct page *__stagingpage_alloc(struct list_head *pagepool, + gfp_t gfp) +{ + struct page *page = erofs_allocpage(pagepool, gfp, true); + + page->mapping = Z_EROFS_MAPPING_STAGING; + return page; +} + +static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, + unsigned int cachestrategy, + erofs_off_t la) +{ + if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) + return false; + + if (fe->backmost) + return true; + + return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && + la < fe->headoffset; +} + +static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, + struct page *page, + struct list_head *pagepool) +{ + struct inode *const inode = fe->inode; + struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); + struct erofs_map_blocks *const map = &fe->map; + struct z_erofs_collector *const clt = &fe->clt; + const loff_t offset = page_offset(page); + bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); + + enum z_erofs_cache_alloctype cache_strategy; + enum z_erofs_page_type page_type; + unsigned int cur, end, spiltted, index; + int err = 0; + + /* register locked file pages as online pages in pack */ + z_erofs_onlinepage_init(page); + + spiltted = 0; + end = PAGE_SIZE; +repeat: + cur = end - 1; + + /* lucky, within the range of the current map_blocks */ + if (offset + cur >= map->m_la && + offset + cur < map->m_la + map->m_llen) { + /* didn't get a valid collection previously (very rare) */ + if (!clt->cl) + goto restart_now; + goto hitted; + } + + /* go ahead the next map_blocks */ + debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); + + if (z_erofs_collector_end(clt)) + fe->backmost = false; + + map->m_la = offset + cur; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (unlikely(err)) + goto err_out; + +restart_now: + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) + goto hitted; + + err = z_erofs_collector_begin(clt, inode, map); + if (unlikely(err)) + goto err_out; + + /* preload all compressed pages (maybe downgrade role if necessary) */ + if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) + cache_strategy = DELAYEDALLOC; + else + cache_strategy = DONTALLOC; + + preload_compressed_pages(clt, MNGD_MAPPING(sbi), + cache_strategy, pagepool); + + tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); +hitted: + cur = end - min_t(unsigned int, offset + end - map->m_la, end); + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, cur, end); + goto next_part; + } + + /* let's derive page type */ + page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : + (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + + if (cur) + tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); + +retry: + err = z_erofs_attach_page(clt, page, page_type); + /* should allocate an additional staging page for pagevec */ + if (err == -EAGAIN) { + struct page *const newpage = + __stagingpage_alloc(pagepool, GFP_NOFS); + + err = z_erofs_attach_page(clt, newpage, + Z_EROFS_PAGE_TYPE_EXCLUSIVE); + if (likely(!err)) + goto retry; + } + + if (unlikely(err)) + goto err_out; + + index = page->index - (map->m_la >> PAGE_SHIFT); + + z_erofs_onlinepage_fixup(page, index, true); + + /* bump up the number of spiltted parts of a page */ + ++spiltted; + /* also update nr_pages */ + clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); +next_part: + /* can be used for verification */ + map->m_llen = offset + cur - map->m_la; + + end = cur; + if (end > 0) + goto repeat; + +out: + z_erofs_onlinepage_endio(page); + + debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", + __func__, page, spiltted, map->m_llen); + return err; + + /* if some error occurred while processing this page */ +err_out: + SetPageError(page); + goto out; +} + +static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) +{ + tagptr1_t t = tagptr_init(tagptr1_t, ptr); + struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); + bool background = tagptr_unfold_tags(t); + + if (!background) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); + return; + } + + if (!atomic_add_return(bios, &io->pending_bios)) + queue_work(z_erofs_workqueue, &io->u.work); +} + +static inline void z_erofs_vle_read_endio(struct bio *bio) +{ + struct erofs_sb_info *sbi = NULL; + blk_status_t err = bio->bi_status; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + bool cachemngd = false; + + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!page->mapping); + + if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { + sbi = EROFS_SB(page->mapping->host->i_sb); + + if (time_to_inject(sbi, FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + } + + /* sbi should already be gotten if the page is managed */ + if (sbi) + cachemngd = erofs_page_is_managed(sbi, page); + + if (unlikely(err)) + SetPageError(page); + else if (cachemngd) + SetPageUptodate(page); + + if (cachemngd) + unlock_page(page); + } + + z_erofs_vle_unzip_kickoff(bio->bi_private, -1); + bio_put(bio); +} + +static int z_erofs_decompress_pcluster(struct super_block *sb, + struct z_erofs_pcluster *pcl, + struct list_head *pagepool) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct z_erofs_pagevec_ctor ctor; + unsigned int i, outputsize, llen, nr_pages; + struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; + struct page **pages, **compressed_pages, *page; + + enum z_erofs_page_type page_type; + bool overlapped, partial; + struct z_erofs_collection *cl; + int err; + + might_sleep(); + cl = z_erofs_primarycollection(pcl); + DBG_BUGON(!READ_ONCE(cl->nr_pages)); + + mutex_lock(&cl->lock); + nr_pages = cl->nr_pages; + + if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { + pages = pages_onstack; + } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && + mutex_trylock(&z_pagemap_global_lock)) { + pages = z_pagemap_global; + } else { + gfp_t gfp_flags = GFP_KERNEL; + + if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) + gfp_flags |= __GFP_NOFAIL; + + pages = kvmalloc_array(nr_pages, sizeof(struct page *), + gfp_flags); + + /* fallback to global pagemap for the lowmem scenario */ + if (unlikely(!pages)) { + mutex_lock(&z_pagemap_global_lock); + pages = z_pagemap_global; + } + } + + for (i = 0; i < nr_pages; ++i) + pages[i] = NULL; + + err = 0; + z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, 0); + + for (i = 0; i < cl->vcnt; ++i) { + unsigned int pagenr; + + page = z_erofs_pagevec_dequeue(&ctor, &page_type); + + /* all pages in pagevec ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) + pagenr = 0; + else + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + + /* + * currently EROFS doesn't support multiref(dedup), + * so here erroring out one multiref page. + */ + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + } + z_erofs_pagevec_ctor_exit(&ctor, true); + + overlapped = false; + compressed_pages = pcl->compressed_pages; + + for (i = 0; i < clusterpages; ++i) { + unsigned int pagenr; + + page = compressed_pages[i]; + + /* all compressed pages ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (!z_erofs_page_is_staging(page)) { + if (erofs_page_is_managed(sbi, page)) { + if (unlikely(!PageUptodate(page))) + err = -EIO; + continue; + } + + /* + * only if non-head page can be selected + * for inplace decompression + */ + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + + overlapped = true; + } + + /* PG_error needs checking for inplaced and staging pages */ + if (unlikely(PageError(page))) { + DBG_BUGON(PageUptodate(page)); + err = -EIO; + } + } + + if (unlikely(err)) + goto out; + + llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; + if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { + outputsize = llen; + partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); + } else { + outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; + partial = true; + } + + err = z_erofs_decompress(&(struct z_erofs_decompress_req) { + .sb = sb, + .in = compressed_pages, + .out = pages, + .pageofs_out = cl->pageofs, + .inputsize = PAGE_SIZE, + .outputsize = outputsize, + .alg = pcl->algorithmformat, + .inplace_io = overlapped, + .partial_decoding = partial + }, pagepool); + +out: + /* must handle all compressed pages before endding pages */ + for (i = 0; i < clusterpages; ++i) { + page = compressed_pages[i]; + + if (erofs_page_is_managed(sbi, page)) + continue; + + /* recycle all individual staging pages */ + (void)z_erofs_put_stagingpage(pagepool, page); + + WRITE_ONCE(compressed_pages[i], NULL); + } + + for (i = 0; i < nr_pages; ++i) { + page = pages[i]; + if (!page) + continue; + + DBG_BUGON(!page->mapping); + + /* recycle all individual staging pages */ + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (unlikely(err < 0)) + SetPageError(page); + + z_erofs_onlinepage_endio(page); + } + + if (pages == z_pagemap_global) + mutex_unlock(&z_pagemap_global_lock); + else if (unlikely(pages != pages_onstack)) + kvfree(pages); + + cl->nr_pages = 0; + cl->vcnt = 0; + + /* all cl locks MUST be taken before the following line */ + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); + + /* all cl locks SHOULD be released right now */ + mutex_unlock(&cl->lock); + + z_erofs_collection_put(cl); + return err; +} + +static void z_erofs_vle_unzip_all(struct super_block *sb, + struct z_erofs_unzip_io *io, + struct list_head *pagepool) +{ + z_erofs_next_pcluster_t owned = io->head; + + while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { + struct z_erofs_pcluster *pcl; + + /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); + + /* no possible that 'owned' equals NULL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned, struct z_erofs_pcluster, next); + owned = READ_ONCE(pcl->next); + + z_erofs_decompress_pcluster(sb, pcl, pagepool); + } +} + +static void z_erofs_vle_unzip_wq(struct work_struct *work) +{ + struct z_erofs_unzip_io_sb *iosb = + container_of(work, struct z_erofs_unzip_io_sb, io.u.work); + LIST_HEAD(pagepool); + + DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); + + put_pages_list(&pagepool); + kvfree(iosb); +} + +static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, + unsigned int nr, + struct list_head *pagepool, + struct address_space *mc, + gfp_t gfp) +{ + /* determined at compile time to avoid too many #ifdefs */ + const bool nocache = __builtin_constant_p(mc) ? !mc : false; + const pgoff_t index = pcl->obj.index; + bool tocache = false; + + struct address_space *mapping; + struct page *oldpage, *page; + + compressed_page_t t; + int justfound; + +repeat: + page = READ_ONCE(pcl->compressed_pages[nr]); + oldpage = page; + + if (!page) + goto out_allocpage; + + /* + * the cached page has not been allocated and + * an placeholder is out there, prepare it now. + */ + if (!nocache && page == PAGE_UNALLOCATED) { + tocache = true; + goto out_allocpage; + } + + /* process the target tagged pointer */ + t = tagptr_init(compressed_page_t, page); + justfound = tagptr_unfold_tags(t); + page = tagptr_unfold_ptr(t); + + mapping = READ_ONCE(page->mapping); + + /* + * if managed cache is disabled, it's no way to + * get such a cached-like page. + */ + if (nocache) { + /* if managed cache is disabled, it is impossible `justfound' */ + DBG_BUGON(justfound); + + /* and it should be locked, not uptodate, and not truncated */ + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!mapping); + goto out; + } + + /* + * unmanaged (file) pages are all locked solidly, + * therefore it is impossible for `mapping' to be NULL. + */ + if (mapping && mapping != mc) + /* ought to be unmanaged pages */ + goto out; + + lock_page(page); + + /* only true if page reclaim goes wrong, should never happen */ + DBG_BUGON(justfound && PagePrivate(page)); + + /* the page is still in manage cache */ + if (page->mapping == mc) { + WRITE_ONCE(pcl->compressed_pages[nr], page); + + ClearPageError(page); + if (!PagePrivate(page)) { + /* + * impossible to be !PagePrivate(page) for + * the current restriction as well if + * the page is already in compressed_pages[]. + */ + DBG_BUGON(!justfound); + + justfound = 0; + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); + } + + /* no need to submit io if it is already up-to-date */ + if (PageUptodate(page)) { + unlock_page(page); + page = NULL; + } + goto out; + } + + /* + * the managed page has been truncated, it's unsafe to + * reuse this one, let's allocate a new cache-managed page. + */ + DBG_BUGON(page->mapping); + DBG_BUGON(!justfound); + + tocache = true; + unlock_page(page); + put_page(page); +out_allocpage: + page = __stagingpage_alloc(pagepool, gfp); + if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { + list_add(&page->lru, pagepool); + cpu_relax(); + goto repeat; + } + if (nocache || !tocache) + goto out; + if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { + page->mapping = Z_EROFS_MAPPING_STAGING; + goto out; + } + + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); +out: /* the only exit (for tracing and debugging) */ + return page; +} + +static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, + struct z_erofs_unzip_io *io, + bool foreground) +{ + struct z_erofs_unzip_io_sb *iosb; + + if (foreground) { + /* waitqueue available for foreground io */ + DBG_BUGON(!io); + + init_waitqueue_head(&io->u.wait); + atomic_set(&io->pending_bios, 0); + goto out; + } + + iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); + DBG_BUGON(!iosb); + + /* initialize fields in the allocated descriptor */ + io = &iosb->io; + iosb->sb = sb; + INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); +out: + io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + return io; +} + +/* define decompression jobqueue types */ +enum { + JQ_BYPASS, + JQ_SUBMIT, + NR_JOBQUEUES, +}; + +static void *jobqueueset_init(struct super_block *sb, + z_erofs_next_pcluster_t qtail[], + struct z_erofs_unzip_io *q[], + struct z_erofs_unzip_io *fgq, + bool forcefg) +{ + /* + * if managed cache is enabled, bypass jobqueue is needed, + * no need to read from device for all pclusters in this queue. + */ + q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); + qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; + + q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); + qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; + + return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); +} + +static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t qtail[], + z_erofs_next_pcluster_t owned_head) +{ + z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; + z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; + + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + if (owned_head == Z_EROFS_PCLUSTER_TAIL) + owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); + + WRITE_ONCE(*submit_qtail, owned_head); + WRITE_ONCE(*bypass_qtail, &pcl->next); + + qtail[JQ_BYPASS] = &pcl->next; +} + +static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], + unsigned int nr_bios, + bool force_fg) +{ + /* + * although background is preferred, no one is pending for submission. + * don't issue workqueue for decompression but drop it directly instead. + */ + if (force_fg || nr_bios) + return false; + + kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); + return true; +} + +static bool z_erofs_vle_submit_all(struct super_block *sb, + z_erofs_next_pcluster_t owned_head, + struct list_head *pagepool, + struct z_erofs_unzip_io *fgq, + bool force_fg) +{ + struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); + z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; + struct z_erofs_unzip_io *q[NR_JOBQUEUES]; + struct bio *bio; + void *bi_private; + /* since bio will be NULL, no need to initialize last_index */ + pgoff_t uninitialized_var(last_index); + bool force_submit = false; + unsigned int nr_bios; + + if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) + return false; + + force_submit = false; + bio = NULL; + nr_bios = 0; + bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); + + /* by default, all need io submission */ + q[JQ_SUBMIT]->head = owned_head; + + do { + struct z_erofs_pcluster *pcl; + unsigned int clusterpages; + pgoff_t first_index; + struct page *page; + unsigned int i = 0, bypass = 0; + int err; + + /* no possible 'owned_head' equals the following */ + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned_head, struct z_erofs_pcluster, next); + + clusterpages = BIT(pcl->clusterbits); + + /* close the main owned chain at first */ + owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + Z_EROFS_PCLUSTER_TAIL_CLOSED); + + first_index = pcl->obj.index; + force_submit |= (first_index != last_index + 1); + +repeat: + page = pickup_page_for_submission(pcl, i, pagepool, + MNGD_MAPPING(sbi), + GFP_NOFS); + if (!page) { + force_submit = true; + ++bypass; + goto skippage; + } + + if (bio && force_submit) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + bio = erofs_grab_bio(sb, first_index + i, + BIO_MAX_PAGES, bi_private, + z_erofs_vle_read_endio, true); + ++nr_bios; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (err < PAGE_SIZE) + goto submit_bio_retry; + + force_submit = false; + last_index = first_index + i; +skippage: + if (++i < clusterpages) + goto repeat; + + if (bypass < clusterpages) + qtail[JQ_SUBMIT] = &pcl->next; + else + move_to_bypass_jobqueue(pcl, qtail, owned_head); + } while (owned_head != Z_EROFS_PCLUSTER_TAIL); + + if (bio) + __submit_bio(bio, REQ_OP_READ, 0); + + if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) + return true; + + z_erofs_vle_unzip_kickoff(bi_private, nr_bios); + return true; +} + +static void z_erofs_submit_and_unzip(struct super_block *sb, + struct z_erofs_collector *clt, + struct list_head *pagepool, + bool force_fg) +{ + struct z_erofs_unzip_io io[NR_JOBQUEUES]; + + if (!z_erofs_vle_submit_all(sb, clt->owned_head, + pagepool, io, force_fg)) + return; + + /* decompress no I/O pclusters immediately */ + z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); + + if (!force_fg) + return; + + /* wait until all bios are completed */ + wait_event(io[JQ_SUBMIT].u.wait, + !atomic_read(&io[JQ_SUBMIT].pending_bios)); + + /* let's synchronous decompression */ + z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); +} + +static int z_erofs_vle_normalaccess_readpage(struct file *file, + struct page *page) +{ + struct inode *const inode = page->mapping->host; + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + int err; + LIST_HEAD(pagepool); + + trace_erofs_readpage(page, false); + + f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; + + err = z_erofs_do_read_page(&f, page, &pagepool); + (void)z_erofs_collector_end(&f.clt); + + /* if some compressed cluster ready, need submit them anyway */ + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); + + if (err) + errln("%s, failed to read, err [%d]", __func__, err); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return err; +} + +static bool should_decompress_synchronously(struct erofs_sb_info *sbi, + unsigned int nr) +{ + return nr <= sbi->max_sync_decompress_pages; +} + +static int z_erofs_vle_normalaccess_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + struct inode *const inode = mapping->host; + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + bool sync = should_decompress_synchronously(sbi, nr_pages); + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + struct page *head = NULL; + LIST_HEAD(pagepool); + + trace_erofs_readpages(mapping->host, lru_to_page(pages), + nr_pages, false); + + f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; + + for (; nr_pages; --nr_pages) { + struct page *page = lru_to_page(pages); + + prefetchw(&page->flags); + list_del(&page->lru); + + /* + * A pure asynchronous readahead is indicated if + * a PG_readahead marked page is hitted at first. + * Let's also do asynchronous decompression for this case. + */ + sync &= !(PageReadahead(page) && !head); + + if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { + list_add(&page->lru, &pagepool); + continue; + } + + set_page_private(page, (unsigned long)head); + head = page; + } + + while (head) { + struct page *page = head; + int err; + + /* traversal in reverse order */ + head = (void *)page_private(page); + + err = z_erofs_do_read_page(&f, page, &pagepool); + if (err) { + struct erofs_vnode *vi = EROFS_V(inode); + + errln("%s, readahead error at page %lu of nid %llu", + __func__, page->index, vi->nid); + } + put_page(page); + } + + (void)z_erofs_collector_end(&f.clt); + + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return 0; +} + +const struct address_space_operations z_erofs_vle_normalaccess_aops = { + .readpage = z_erofs_vle_normalaccess_readpage, + .readpages = z_erofs_vle_normalaccess_readpages, +}; + diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h new file mode 100644 index 000000000000..4fc547bc01f9 --- /dev/null +++ b/fs/erofs/zdata.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZDATA_H +#define __EROFS_FS_ZDATA_H + +#include "internal.h" +#include "zpvec.h" + +#define Z_EROFS_NR_INLINE_PAGEVECS 3 + +/* + * Structure fields follow one of the following exclusion rules. + * + * I: Modifiable by initialization/destruction paths and read-only + * for everyone else; + * + * L: Field should be protected by pageset lock; + * + * A: Field should be accessed / updated in atomic for parallelized code. + */ +struct z_erofs_collection { + struct mutex lock; + + /* I: page offset of start position of decompression */ + unsigned short pageofs; + + /* L: maximum relative page index in pagevec[] */ + unsigned short nr_pages; + + /* L: total number of pages in pagevec[] */ + unsigned int vcnt; + + union { + /* L: inline a certain number of pagevecs for bootstrap */ + erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; + + /* I: can be used to free the pcluster by RCU. */ + struct rcu_head rcu; + }; +}; + +#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 +#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 + +/* + * let's leave a type here in case of introducing + * another tagged pointer later. + */ +typedef void *z_erofs_next_pcluster_t; + +struct z_erofs_pcluster { + struct erofs_workgroup obj; + struct z_erofs_collection primary_collection; + + /* A: point to next chained pcluster or TAILs */ + z_erofs_next_pcluster_t next; + + /* A: compressed pages (including multi-usage pages) */ + struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; + + /* A: lower limit of decompressed length and if full length or not */ + unsigned int length; + + /* I: compression algorithm format */ + unsigned char algorithmformat; + /* I: bit shift of physical cluster size */ + unsigned char clusterbits; +}; + +#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) + +/* let's avoid the valid 32-bit kernel addresses */ + +/* the chained workgroup has't submitted io (still open) */ +#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) +/* the chained workgroup has already submitted io */ +#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) + +#define Z_EROFS_PCLUSTER_NIL (NULL) + +#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) + +struct z_erofs_unzip_io { + atomic_t pending_bios; + z_erofs_next_pcluster_t head; + + union { + wait_queue_head_t wait; + struct work_struct work; + } u; +}; + +struct z_erofs_unzip_io_sb { + struct z_erofs_unzip_io io; + struct super_block *sb; +}; + +#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) +static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, + struct page *page) +{ + return page->mapping == MNGD_MAPPING(sbi); +} + +#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 +#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) +#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) + +/* + * waiters (aka. ongoing_packs): # to unlock the page + * sub-index: 0 - for partial page, >= 1 full page sub-index + */ +typedef atomic_t z_erofs_onlinepage_t; + +/* type punning */ +union z_erofs_onlinepage_converter { + z_erofs_onlinepage_t *o; + unsigned long *v; +}; + +static inline unsigned int z_erofs_onlinepage_index(struct page *page) +{ + union z_erofs_onlinepage_converter u; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; +} + +static inline void z_erofs_onlinepage_init(struct page *page) +{ + union { + z_erofs_onlinepage_t o; + unsigned long v; + /* keep from being unlocked in advance */ + } u = { .o = ATOMIC_INIT(1) }; + + set_page_private(page, u.v); + smp_wmb(); + SetPagePrivate(page); +} + +static inline void z_erofs_onlinepage_fixup(struct page *page, + uintptr_t index, bool down) +{ + unsigned long *p, o, v, id; +repeat: + p = &page_private(page); + o = READ_ONCE(*p); + + id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (id) { + if (!index) + return; + + DBG_BUGON(id != index); + } + + v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (cmpxchg(p, o, v) != o) + goto repeat; +} + +static inline void z_erofs_onlinepage_endio(struct page *page) +{ + union z_erofs_onlinepage_converter u; + unsigned int v; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + v = atomic_dec_return(u.o); + if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { + ClearPagePrivate(page); + if (!PageError(page)) + SetPageUptodate(page); + unlock_page(page); + } + debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); +} + +#define Z_EROFS_VMAP_ONSTACK_PAGES \ + min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) +#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 + +#endif + diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c new file mode 100644 index 000000000000..4dc9cec01297 --- /dev/null +++ b/fs/erofs/zmap.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018-2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include +#include + +int z_erofs_fill_inode(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + + if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { + vi->z_advise = 0; + vi->z_algorithmtype[0] = 0; + vi->z_algorithmtype[1] = 0; + vi->z_logical_clusterbits = LOG_BLOCK_SIZE; + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); + } + + inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; + return 0; +} + +static int fill_inode_lazy(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + int err; + erofs_off_t pos; + struct page *page; + void *kaddr; + struct z_erofs_map_header *h; + + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + err = 0; + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + goto out_unlock; + + DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); + + pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + + vi->xattr_isize, 8); + page = erofs_get_meta_page(sb, erofs_blknr(pos), false); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out_unlock; + } + + kaddr = kmap_atomic(page); + + h = kaddr + erofs_blkoff(pos); + vi->z_advise = le16_to_cpu(h->h_advise); + vi->z_algorithmtype[0] = h->h_algorithmtype & 15; + vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; + + if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { + errln("unknown compression format %u for nid %llu, please upgrade kernel", + vi->z_algorithmtype[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 3) & 3); + + if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { + errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", + vi->z_physical_clusterbits[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 5) & 7); + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); +unmap_done: + kunmap_atomic(kaddr); + unlock_page(page); + put_page(page); +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); + return err; +} + +struct z_erofs_maprecorder { + struct inode *inode; + struct erofs_map_blocks *map; + void *kaddr; + + unsigned long lcn; + /* compression extent information gathered */ + u8 type; + u16 clusterofs; + u16 delta[2]; + erofs_blk_t pblk; +}; + +static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, + erofs_blk_t eblk) +{ + struct super_block *const sb = m->inode->i_sb; + struct erofs_map_blocks *const map = m->map; + struct page *mpage = map->mpage; + + if (mpage) { + if (mpage->index == eblk) { + if (!m->kaddr) + m->kaddr = kmap_atomic(mpage); + return 0; + } + + if (m->kaddr) { + kunmap_atomic(m->kaddr); + m->kaddr = NULL; + } + put_page(mpage); + } + + mpage = erofs_get_meta_page(sb, eblk, false); + if (IS_ERR(mpage)) { + map->mpage = NULL; + return PTR_ERR(mpage); + } + m->kaddr = kmap_atomic(mpage); + unlock_page(mpage); + map->mpage = mpage; + return 0; +} + +static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); + const erofs_off_t pos = + Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + + vi->xattr_isize) + + lcn * sizeof(struct z_erofs_vle_decompressed_index); + struct z_erofs_vle_decompressed_index *di; + unsigned int advise, type; + int err; + + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + + m->lcn = lcn; + di = m->kaddr + erofs_blkoff(pos); + + advise = le16_to_cpu(di->di_advise); + type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & + ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); + switch (type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + m->clusterofs = 1 << vi->z_logical_clusterbits; + m->delta[0] = le16_to_cpu(di->di_u.delta[0]); + m->delta[1] = le16_to_cpu(di->di_u.delta[1]); + break; + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + m->clusterofs = le16_to_cpu(di->di_clusterofs); + m->pblk = le32_to_cpu(di->di_u.blkaddr); + break; + default: + DBG_BUGON(1); + return -EOPNOTSUPP; + } + m->type = type; + return 0; +} + +static unsigned int decode_compactedbits(unsigned int lobits, + unsigned int lomask, + u8 *in, unsigned int pos, u8 *type) +{ + const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); + const unsigned int lo = v & lomask; + + *type = (v >> lobits) & 3; + return lo; +} + +static int unpack_compacted_index(struct z_erofs_maprecorder *m, + unsigned int amortizedshift, + unsigned int eofs) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const unsigned int lomask = (1 << lclusterbits) - 1; + unsigned int vcnt, base, lo, encodebits, nblk; + int i; + u8 *in, type; + + if (1 << amortizedshift == 4) + vcnt = 2; + else if (1 << amortizedshift == 2 && lclusterbits == 12) + vcnt = 16; + else + return -EOPNOTSUPP; + + encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; + base = round_down(eofs, vcnt << amortizedshift); + in = m->kaddr + base; + + i = (eofs - base) >> amortizedshift; + + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + m->type = type; + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { + m->clusterofs = 1 << lclusterbits; + if (i + 1 != vcnt) { + m->delta[0] = lo; + return 0; + } + /* + * since the last lcluster in the pack is special, + * of which lo saves delta[1] rather than delta[0]. + * Hence, get delta[0] by the previous lcluster indirectly. + */ + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * (i - 1), &type); + if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + lo = 0; + m->delta[0] = lo + 1; + return 0; + } + m->clusterofs = lo; + m->delta[0] = 0; + /* figout out blkaddr (pblk) for HEAD lclusters */ + nblk = 1; + while (i > 0) { + --i; + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + i -= lo; + + if (i >= 0) + ++nblk; + } + in += (vcnt << amortizedshift) - sizeof(__le32); + m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; + return 0; +} + +static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + + vi->inode_isize + vi->xattr_isize, 8) + + sizeof(struct z_erofs_map_header); + const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); + unsigned int compacted_4b_initial, compacted_2b; + unsigned int amortizedshift; + erofs_off_t pos; + int err; + + if (lclusterbits != 12) + return -EOPNOTSUPP; + + if (lcn >= totalidx) + return -EINVAL; + + m->lcn = lcn; + /* used to align to 32-byte (compacted_2b) alignment */ + compacted_4b_initial = (32 - ebase % 32) / 4; + if (compacted_4b_initial == 32 / 4) + compacted_4b_initial = 0; + + if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) + compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); + else + compacted_2b = 0; + + pos = ebase; + if (lcn < compacted_4b_initial) { + amortizedshift = 2; + goto out; + } + pos += compacted_4b_initial * 4; + lcn -= compacted_4b_initial; + + if (lcn < compacted_2b) { + amortizedshift = 1; + goto out; + } + pos += compacted_2b * 2; + lcn -= compacted_2b; + amortizedshift = 2; +out: + pos += lcn * (1 << amortizedshift); + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); +} + +static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned int lcn) +{ + const unsigned int datamode = EROFS_V(m->inode)->datamode; + + if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) + return vle_legacy_load_cluster_from_disk(m, lcn); + + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return compacted_load_cluster_from_disk(m, lcn); + + return -EINVAL; +} + +static int vle_extent_lookback(struct z_erofs_maprecorder *m, + unsigned int lookback_distance) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + struct erofs_map_blocks *const map = m->map; + const unsigned int lclusterbits = vi->z_logical_clusterbits; + unsigned long lcn = m->lcn; + int err; + + if (lcn < lookback_distance) { + errln("bogus lookback distance @ nid %llu", vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* load extent head logical cluster if needed */ + lcn -= lookback_distance; + err = vle_load_cluster_from_disk(m, lcn); + if (err) + return err; + + switch (m->type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + if (unlikely(!m->delta[0])) { + errln("invalid lookback distance 0 at nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + return vle_extent_lookback(m, m->delta[0]); + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + map->m_la = (lcn << lclusterbits) | m->clusterofs; + break; + default: + errln("unknown type %u at lcn %lu of nid %llu", + m->type, lcn, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + return 0; +} + +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct z_erofs_maprecorder m = { + .inode = inode, + .map = map, + }; + int err = 0; + unsigned int lclusterbits, endoff; + unsigned long long ofs, end; + + trace_z_erofs_map_blocks_iter_enter(inode, map, flags); + + /* when trying to read beyond EOF, leave it unmapped */ + if (unlikely(map->m_la >= inode->i_size)) { + map->m_llen = map->m_la + 1 - inode->i_size; + map->m_la = inode->i_size; + map->m_flags = 0; + goto out; + } + + err = fill_inode_lazy(inode); + if (err) + goto out; + + lclusterbits = vi->z_logical_clusterbits; + ofs = map->m_la; + m.lcn = ofs >> lclusterbits; + endoff = ofs & ((1 << lclusterbits) - 1); + + err = vle_load_cluster_from_disk(&m, m.lcn); + if (err) + goto unmap_out; + + map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ + end = (m.lcn + 1ULL) << lclusterbits; + + switch (m.type) { + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + if (endoff >= m.clusterofs) + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + if (endoff >= m.clusterofs) { + map->m_la = (m.lcn << lclusterbits) | m.clusterofs; + break; + } + /* m.lcn should be >= 1 if endoff < m.clusterofs */ + if (unlikely(!m.lcn)) { + errln("invalid logical cluster 0 at nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_out; + } + end = (m.lcn << lclusterbits) | m.clusterofs; + map->m_flags |= EROFS_MAP_FULL_MAPPED; + m.delta[0] = 1; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + /* get the correspoinding first chunk */ + err = vle_extent_lookback(&m, m.delta[0]); + if (unlikely(err)) + goto unmap_out; + break; + default: + errln("unknown type %u at offset %llu of nid %llu", + m.type, ofs, vi->nid); + err = -EOPNOTSUPP; + goto unmap_out; + } + + map->m_llen = end - map->m_la; + map->m_plen = 1 << lclusterbits; + map->m_pa = blknr_to_addr(m.pblk); + map->m_flags |= EROFS_MAP_MAPPED; + +unmap_out: + if (m.kaddr) + kunmap_atomic(m.kaddr); + +out: + debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", + __func__, map->m_la, map->m_pa, + map->m_llen, map->m_plen, map->m_flags); + + trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); + + /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ + DBG_BUGON(err < 0 && err != -ENOMEM); + return err; +} + diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h new file mode 100644 index 000000000000..bd3cee16491c --- /dev/null +++ b/fs/erofs/zpvec.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZPVEC_H +#define __EROFS_FS_ZPVEC_H + +#include "tagptr.h" + +/* page type in pagevec for decompress subsystem */ +enum z_erofs_page_type { + /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ + Z_EROFS_PAGE_TYPE_EXCLUSIVE, + + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, + + Z_EROFS_VLE_PAGE_TYPE_HEAD, + Z_EROFS_VLE_PAGE_TYPE_MAX +}; + +extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") + __bad_page_type_exclusive(void); + +/* pagevec tagged pointer */ +typedef tagptr2_t erofs_vtptr_t; + +/* pagevec collector */ +struct z_erofs_pagevec_ctor { + struct page *curr, *next; + erofs_vtptr_t *pages; + + unsigned int nr, index; +}; + +static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + if (!ctor->curr) + return; + + if (atomic) + kunmap_atomic(ctor->pages); + else + kunmap(ctor->curr); +} + +static inline struct page * +z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr) +{ + unsigned int index; + + /* keep away from occupied pages */ + if (ctor->next) + return ctor->next; + + for (index = 0; index < nr; ++index) { + const erofs_vtptr_t t = ctor->pages[index]; + const unsigned int tags = tagptr_unfold_tags(t); + + if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) + return tagptr_unfold_ptr(t); + } + DBG_BUGON(nr >= ctor->nr); + return NULL; +} + +static inline void +z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); + + z_erofs_pagevec_ctor_exit(ctor, atomic); + + ctor->curr = next; + ctor->next = NULL; + ctor->pages = atomic ? + kmap_atomic(ctor->curr) : kmap(ctor->curr); + + ctor->nr = PAGE_SIZE / sizeof(struct page *); + ctor->index = 0; +} + +static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr, + erofs_vtptr_t *pages, + unsigned int i) +{ + ctor->nr = nr; + ctor->curr = ctor->next = NULL; + ctor->pages = pages; + + if (i >= nr) { + i -= nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + while (i > ctor->nr) { + i -= ctor->nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + } + } + ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); + ctor->index = i; +} + +static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, + struct page *page, + enum z_erofs_page_type type, + bool *occupied) +{ + *occupied = false; + if (unlikely(!ctor->next && type)) + if (ctor->index + 1 == ctor->nr) + return false; + + if (unlikely(ctor->index >= ctor->nr)) + z_erofs_pagevec_ctor_pagedown(ctor, false); + + /* exclusive page type must be 0 */ + if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) + __bad_page_type_exclusive(); + + /* should remind that collector->next never equal to 1, 2 */ + if (type == (uintptr_t)ctor->next) { + ctor->next = page; + *occupied = true; + } + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); + return true; +} + +static inline struct page * +z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, + enum z_erofs_page_type *type) +{ + erofs_vtptr_t t; + + if (unlikely(ctor->index >= ctor->nr)) { + DBG_BUGON(!ctor->next); + z_erofs_pagevec_ctor_pagedown(ctor, true); + } + + t = ctor->pages[ctor->index]; + + *type = tagptr_unfold_tags(t); + + /* should remind that collector->next never equal to 1, 2 */ + if (*type == (uintptr_t)ctor->next) + ctor->next = tagptr_unfold_ptr(t); + + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); + return tagptr_unfold_ptr(t); +} +#endif + diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h new file mode 100644 index 000000000000..bfb2da9c4eee --- /dev/null +++ b/include/trace/events/erofs.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM erofs + +#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EROFS_H + +#include + +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_nid(entry) show_dev(entry->dev), entry->nid + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_map_flags(flags) __print_flags(flags, "|", \ + { EROFS_GET_BLOCKS_RAW, "RAW" }) + +#define show_mflags(flags) __print_flags(flags, "", \ + { EROFS_MAP_MAPPED, "M" }, \ + { EROFS_MAP_META, "I" }, \ + { EROFS_MAP_ZIPPED, "Z" }) + +TRACE_EVENT(erofs_lookup, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(const char *, name ) + __field(unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->nid = EROFS_V(dir)->nid; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", + show_dev_nid(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(erofs_fill_inode, + TP_PROTO(struct inode *inode, int isdir), + TP_ARGS(inode, isdir), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(erofs_blk_t, blkaddr ) + __field(unsigned int, ofs ) + __field(int, isdir ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->isdir = isdir; + ), + + TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", + show_dev_nid(__entry), + __entry->blkaddr, __entry->ofs, + __entry->isdir) +); + +TRACE_EVENT(erofs_readpage, + + TP_PROTO(struct page *page, bool raw), + + TP_ARGS(page, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(int, dir ) + __field(pgoff_t, index ) + __field(int, uptodate) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->nid = EROFS_V(page->mapping->host)->nid; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->uptodate = PageUptodate(page); + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " + "raw = %d", + show_dev_nid(__entry), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->uptodate, + __entry->raw) +); + +TRACE_EVENT(erofs_readpages, + + TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, + bool raw), + + TP_ARGS(inode, page, nrpage, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(pgoff_t, start ) + __field(unsigned int, nrpage ) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->start = page->index; + __entry->nrpage = nrpage; + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", + show_dev_nid(__entry), + (unsigned long)__entry->start, + __entry->nrpage, + __entry->raw) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( erofs_off_t, la ) + __field( u64, llen ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->la = map->m_la; + __entry->llen = map->m_llen; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", + show_dev_nid(__entry), + __entry->la, __entry->llen, + __entry->flags ? show_map_flags(__entry->flags) : "NULL") +); + +DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags), + + TP_ARGS(inode, map, flags) +); + +DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( unsigned int, flags ) + __field( erofs_off_t, la ) + __field( erofs_off_t, pa ) + __field( u64, llen ) + __field( u64, plen ) + __field( unsigned int, mflags ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->flags = flags; + __entry->la = map->m_la; + __entry->pa = map->m_pa; + __entry->llen = map->m_llen; + __entry->plen = map->m_plen; + __entry->mflags = map->m_flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), nid = %llu, flags %s " + "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", + show_dev_nid(__entry), + __entry->flags ? show_map_flags(__entry->flags) : "NULL", + __entry->la, __entry->pa, __entry->llen, __entry->plen, + show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +TRACE_EVENT(erofs_destroy_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + ), + + TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) +); + +#endif /* _TRACE_EROFS_H */ + + /* This part must be outside protection */ +#include diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 1274c692e59c..903cc2d2750b 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -19,6 +19,7 @@ #define SQUASHFS_MAGIC 0x73717368 #define ECRYPTFS_SUPER_MAGIC 0xf15f #define EFS_SUPER_MAGIC 0x414A53 +#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 #define XENFS_SUPER_MAGIC 0xabba1974 -- cgit v1.2.3-71-gd317 From a1db98f20b818edf15f4ad70072f771b6f185949 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 26 Aug 2019 21:26:53 +0800 Subject: erofs: fix compile warnings when moving out include/trace/events/erofs.h As Stephon reported [1], many compile warnings are raised when moving out include/trace/events/erofs.h: In file included from include/trace/events/erofs.h:8, from : include/trace/events/erofs.h:28:37: warning: 'struct dentry' declared inside parameter list will not be visible outside of this definition or declaration TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), ^~~~~~ include/linux/tracepoint.h:233:34: note: in definition of macro '__DECLARE_TRACE' static inline void trace_##name(proto) \ ^~~~~ include/linux/tracepoint.h:396:24: note: in expansion of macro 'PARAMS' __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ^~~~~~ include/linux/tracepoint.h:532:2: note: in expansion of macro 'DECLARE_TRACE' DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) ^~~~~~~~~~~~~ include/linux/tracepoint.h:532:22: note: in expansion of macro 'PARAMS' DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) ^~~~~~ include/trace/events/erofs.h:26:1: note: in expansion of macro 'TRACE_EVENT' TRACE_EVENT(erofs_lookup, ^~~~~~~~~~~ include/trace/events/erofs.h:28:2: note: in expansion of macro 'TP_PROTO' TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), ^~~~~~~~ That makes me very confused since most original EROFS tracepoint code was taken from f2fs, and finally I found commit 43c78d88036e ("kbuild: compile-test kernel headers to ensure they are self-contained") It seems these warnings are generated from KERNEL_HEADER_TEST feature and ext4/f2fs tracepoint files were in blacklist. Anyway, let's fix these issues for KERNEL_HEADER_TEST feature instead of adding to blacklist... [1] https://lore.kernel.org/lkml/20190826162432.11100665@canb.auug.org.au/ Reported-by: Stephen Rothwell Signed-off-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20190826132653.100731-1-gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman --- include/trace/events/erofs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index bfb2da9c4eee..d239f39cbc8c 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -6,6 +6,9 @@ #define _TRACE_EROFS_H #include +#include + +struct erofs_map_blocks; #define show_dev(dev) MAJOR(dev), MINOR(dev) #define show_dev_nid(entry) show_dev(entry->dev), entry->nid -- cgit v1.2.3-71-gd317 From ec0ad868173da8a75121f9dc116a5d5478ff614d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 25 Aug 2019 07:54:27 +0200 Subject: staging: greybus: move core include files to include/linux/greybus/ With the goal of moving the core of the greybus code out of staging, the include files need to be moved to include/linux/greybus.h and include/linux/greybus/ Cc: Vaibhav Hiremath Cc: Johan Hovold Cc: Vaibhav Agarwal Cc: Rui Miguel Silva Cc: David Lin Cc: "Bryan O'Donoghue" Cc: greybus-dev@lists.linaro.org Cc: devel@driverdev.osuosl.org Acked-by: Mark Greer Acked-by: Viresh Kumar Acked-by: Alex Elder Link: https://lore.kernel.org/r/20190825055429.18547-8-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/arche-platform.c | 2 +- drivers/staging/greybus/audio_apbridgea.c | 3 +- drivers/staging/greybus/audio_codec.h | 4 +- drivers/staging/greybus/audio_gb.c | 4 +- drivers/staging/greybus/authentication.c | 3 +- drivers/staging/greybus/bootrom.c | 2 +- drivers/staging/greybus/bundle.c | 2 +- drivers/staging/greybus/bundle.h | 89 -- drivers/staging/greybus/camera.c | 2 +- drivers/staging/greybus/connection.c | 2 +- drivers/staging/greybus/connection.h | 128 -- drivers/staging/greybus/control.c | 2 +- drivers/staging/greybus/control.h | 57 - drivers/staging/greybus/core.c | 2 +- drivers/staging/greybus/debugfs.c | 3 +- drivers/staging/greybus/es2.c | 3 +- drivers/staging/greybus/firmware.h | 2 +- drivers/staging/greybus/fw-core.c | 2 +- drivers/staging/greybus/fw-download.c | 2 +- drivers/staging/greybus/fw-management.c | 2 +- drivers/staging/greybus/gbphy.c | 2 +- drivers/staging/greybus/gpio.c | 2 +- drivers/staging/greybus/greybus.h | 152 -- drivers/staging/greybus/greybus_id.h | 27 - drivers/staging/greybus/greybus_manifest.h | 178 --- drivers/staging/greybus/greybus_protocols.h | 2176 --------------------------- drivers/staging/greybus/hd.c | 2 +- drivers/staging/greybus/hd.h | 82 - drivers/staging/greybus/hid.c | 3 +- drivers/staging/greybus/i2c.c | 2 +- drivers/staging/greybus/interface.c | 2 +- drivers/staging/greybus/interface.h | 82 - drivers/staging/greybus/light.c | 4 +- drivers/staging/greybus/log.c | 3 +- drivers/staging/greybus/loopback.c | 5 +- drivers/staging/greybus/manifest.c | 2 +- drivers/staging/greybus/manifest.h | 15 - drivers/staging/greybus/module.c | 2 +- drivers/staging/greybus/module.h | 33 - drivers/staging/greybus/operation.c | 2 +- drivers/staging/greybus/operation.h | 224 --- drivers/staging/greybus/power_supply.c | 3 +- drivers/staging/greybus/pwm.c | 2 +- drivers/staging/greybus/raw.c | 3 +- drivers/staging/greybus/sdio.c | 2 +- drivers/staging/greybus/spi.c | 2 +- drivers/staging/greybus/spilib.c | 2 +- drivers/staging/greybus/svc.c | 3 +- drivers/staging/greybus/svc.h | 101 -- drivers/staging/greybus/svc_watchdog.c | 2 +- drivers/staging/greybus/uart.c | 2 +- drivers/staging/greybus/usb.c | 2 +- drivers/staging/greybus/vibrator.c | 3 +- include/linux/greybus.h | 152 ++ include/linux/greybus/bundle.h | 89 ++ include/linux/greybus/connection.h | 128 ++ include/linux/greybus/control.h | 57 + include/linux/greybus/greybus_id.h | 27 + include/linux/greybus/greybus_manifest.h | 178 +++ include/linux/greybus/greybus_protocols.h | 2176 +++++++++++++++++++++++++++ include/linux/greybus/hd.h | 82 + include/linux/greybus/interface.h | 82 + include/linux/greybus/manifest.h | 15 + include/linux/greybus/module.h | 33 + include/linux/greybus/operation.h | 224 +++ include/linux/greybus/svc.h | 101 ++ 66 files changed, 3384 insertions(+), 3403 deletions(-) delete mode 100644 drivers/staging/greybus/bundle.h delete mode 100644 drivers/staging/greybus/connection.h delete mode 100644 drivers/staging/greybus/control.h delete mode 100644 drivers/staging/greybus/greybus.h delete mode 100644 drivers/staging/greybus/greybus_id.h delete mode 100644 drivers/staging/greybus/greybus_manifest.h delete mode 100644 drivers/staging/greybus/greybus_protocols.h delete mode 100644 drivers/staging/greybus/hd.h delete mode 100644 drivers/staging/greybus/interface.h delete mode 100644 drivers/staging/greybus/manifest.h delete mode 100644 drivers/staging/greybus/module.h delete mode 100644 drivers/staging/greybus/operation.h delete mode 100644 drivers/staging/greybus/svc.h create mode 100644 include/linux/greybus.h create mode 100644 include/linux/greybus/bundle.h create mode 100644 include/linux/greybus/connection.h create mode 100644 include/linux/greybus/control.h create mode 100644 include/linux/greybus/greybus_id.h create mode 100644 include/linux/greybus/greybus_manifest.h create mode 100644 include/linux/greybus/greybus_protocols.h create mode 100644 include/linux/greybus/hd.h create mode 100644 include/linux/greybus/interface.h create mode 100644 include/linux/greybus/manifest.h create mode 100644 include/linux/greybus/module.h create mode 100644 include/linux/greybus/operation.h create mode 100644 include/linux/greybus/svc.h (limited to 'include') diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index 6eb842040c22..eebf0deb39f5 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -19,8 +19,8 @@ #include #include #include +#include #include "arche_platform.h" -#include "greybus.h" #if IS_ENABLED(CONFIG_USB_HSIC_USB3613) #include diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c index 7ebb1bde5cb7..26117e390deb 100644 --- a/drivers/staging/greybus/audio_apbridgea.c +++ b/drivers/staging/greybus/audio_apbridgea.c @@ -5,8 +5,7 @@ * Copyright 2015-2016 Google Inc. */ -#include "greybus.h" -#include "greybus_protocols.h" +#include #include "audio_apbridgea.h" #include "audio_codec.h" diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h index 9ba09ea9c2fc..cb5d271da1a5 100644 --- a/drivers/staging/greybus/audio_codec.h +++ b/drivers/staging/greybus/audio_codec.h @@ -8,12 +8,10 @@ #ifndef __LINUX_GBAUDIO_CODEC_H #define __LINUX_GBAUDIO_CODEC_H +#include #include #include -#include "greybus.h" -#include "greybus_protocols.h" - #define NAME_SIZE 32 #define MAX_DAIS 2 /* APB1, APB2 */ diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c index 8894f1c87d48..9d8994fdb41a 100644 --- a/drivers/staging/greybus/audio_gb.c +++ b/drivers/staging/greybus/audio_gb.c @@ -5,9 +5,7 @@ * Copyright 2015-2016 Google Inc. */ -#include "greybus.h" -#include "greybus_protocols.h" -#include "operation.h" +#include #include "audio_codec.h" /* TODO: Split into separate calls */ diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c index a5d7c53df987..297e69f011c7 100644 --- a/drivers/staging/greybus/authentication.c +++ b/drivers/staging/greybus/authentication.c @@ -6,8 +6,7 @@ * Copyright 2016 Linaro Ltd. */ -#include "greybus.h" - +#include #include #include #include diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c index 402e6360834f..a8efb86de140 100644 --- a/drivers/staging/greybus/bootrom.c +++ b/drivers/staging/greybus/bootrom.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include "greybus.h" #include "firmware.h" /* Timeout, in jiffies, within which the next request must be received */ diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c index 3f702db9e098..84660729538b 100644 --- a/drivers/staging/greybus/bundle.c +++ b/drivers/staging/greybus/bundle.c @@ -6,7 +6,7 @@ * Copyright 2014-2015 Linaro Ltd. */ -#include "greybus.h" +#include #include "greybus_trace.h" static ssize_t bundle_class_show(struct device *dev, diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h deleted file mode 100644 index 8734d2055657..000000000000 --- a/drivers/staging/greybus/bundle.h +++ /dev/null @@ -1,89 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus bundles - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#ifndef __BUNDLE_H -#define __BUNDLE_H - -#include - -#define BUNDLE_ID_NONE U8_MAX - -/* Greybus "public" definitions" */ -struct gb_bundle { - struct device dev; - struct gb_interface *intf; - - u8 id; - u8 class; - u8 class_major; - u8 class_minor; - - size_t num_cports; - struct greybus_descriptor_cport *cport_desc; - - struct list_head connections; - u8 *state; - - struct list_head links; /* interface->bundles */ -}; -#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev) - -/* Greybus "private" definitions" */ -struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, - u8 class); -int gb_bundle_add(struct gb_bundle *bundle); -void gb_bundle_destroy(struct gb_bundle *bundle); - -/* Bundle Runtime PM wrappers */ -#ifdef CONFIG_PM -static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) -{ - int retval; - - retval = pm_runtime_get_sync(&bundle->dev); - if (retval < 0) { - dev_err(&bundle->dev, - "pm_runtime_get_sync failed: %d\n", retval); - pm_runtime_put_noidle(&bundle->dev); - return retval; - } - - return 0; -} - -static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) -{ - int retval; - - pm_runtime_mark_last_busy(&bundle->dev); - retval = pm_runtime_put_autosuspend(&bundle->dev); - - return retval; -} - -static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) -{ - pm_runtime_get_noresume(&bundle->dev); -} - -static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) -{ - pm_runtime_put_noidle(&bundle->dev); -} - -#else -static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) -{ return 0; } -static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) -{ return 0; } - -static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {} -static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {} -#endif - -#endif /* __BUNDLE_H */ diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c index 615c8e7fd51e..b570e13394ac 100644 --- a/drivers/staging/greybus/camera.c +++ b/drivers/staging/greybus/camera.c @@ -14,9 +14,9 @@ #include #include #include +#include #include "gb-camera.h" -#include "greybus.h" #include "greybus_protocols.h" enum gb_camera_debugs_buffer_id { diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c index eda964208cce..fc8f57f97ce6 100644 --- a/drivers/staging/greybus/connection.c +++ b/drivers/staging/greybus/connection.c @@ -7,8 +7,8 @@ */ #include +#include -#include "greybus.h" #include "greybus_trace.h" #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000 diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h deleted file mode 100644 index 5ca3befc0636..000000000000 --- a/drivers/staging/greybus/connection.h +++ /dev/null @@ -1,128 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus connections - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#ifndef __CONNECTION_H -#define __CONNECTION_H - -#include -#include - -#define GB_CONNECTION_FLAG_CSD BIT(0) -#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) -#define GB_CONNECTION_FLAG_OFFLOADED BIT(2) -#define GB_CONNECTION_FLAG_CDSI1 BIT(3) -#define GB_CONNECTION_FLAG_CONTROL BIT(4) -#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5) - -#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL - -enum gb_connection_state { - GB_CONNECTION_STATE_DISABLED = 0, - GB_CONNECTION_STATE_ENABLED_TX = 1, - GB_CONNECTION_STATE_ENABLED = 2, - GB_CONNECTION_STATE_DISCONNECTING = 3, -}; - -struct gb_operation; - -typedef int (*gb_request_handler_t)(struct gb_operation *); - -struct gb_connection { - struct gb_host_device *hd; - struct gb_interface *intf; - struct gb_bundle *bundle; - struct kref kref; - u16 hd_cport_id; - u16 intf_cport_id; - - struct list_head hd_links; - struct list_head bundle_links; - - gb_request_handler_t handler; - unsigned long flags; - - struct mutex mutex; - spinlock_t lock; - enum gb_connection_state state; - struct list_head operations; - - char name[16]; - struct workqueue_struct *wq; - - atomic_t op_cycle; - - void *private; - - bool mode_switch; -}; - -struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, - u16 hd_cport_id, gb_request_handler_t handler); -struct gb_connection *gb_connection_create_control(struct gb_interface *intf); -struct gb_connection *gb_connection_create(struct gb_bundle *bundle, - u16 cport_id, gb_request_handler_t handler); -struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle, - u16 cport_id, gb_request_handler_t handler, - unsigned long flags); -struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle, - u16 cport_id, unsigned long flags); -void gb_connection_destroy(struct gb_connection *connection); - -static inline bool gb_connection_is_static(struct gb_connection *connection) -{ - return !connection->intf; -} - -int gb_connection_enable(struct gb_connection *connection); -int gb_connection_enable_tx(struct gb_connection *connection); -void gb_connection_disable_rx(struct gb_connection *connection); -void gb_connection_disable(struct gb_connection *connection); -void gb_connection_disable_forced(struct gb_connection *connection); - -void gb_connection_mode_switch_prepare(struct gb_connection *connection); -void gb_connection_mode_switch_complete(struct gb_connection *connection); - -void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, - u8 *data, size_t length); - -void gb_connection_latency_tag_enable(struct gb_connection *connection); -void gb_connection_latency_tag_disable(struct gb_connection *connection); - -static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection) -{ - return !(connection->flags & GB_CONNECTION_FLAG_CSD); -} - -static inline bool -gb_connection_flow_control_disabled(struct gb_connection *connection) -{ - return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL; -} - -static inline bool gb_connection_is_offloaded(struct gb_connection *connection) -{ - return connection->flags & GB_CONNECTION_FLAG_OFFLOADED; -} - -static inline bool gb_connection_is_control(struct gb_connection *connection) -{ - return connection->flags & GB_CONNECTION_FLAG_CONTROL; -} - -static inline void *gb_connection_get_data(struct gb_connection *connection) -{ - return connection->private; -} - -static inline void gb_connection_set_data(struct gb_connection *connection, - void *data) -{ - connection->private = data; -} - -#endif /* __CONNECTION_H */ diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c index a9e8b6036cac..359a25841973 100644 --- a/drivers/staging/greybus/control.c +++ b/drivers/staging/greybus/control.c @@ -9,7 +9,7 @@ #include #include #include -#include "greybus.h" +#include /* Highest control-protocol version supported */ #define GB_CONTROL_VERSION_MAJOR 0 diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h deleted file mode 100644 index 3a29ec05f631..000000000000 --- a/drivers/staging/greybus/control.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus CPort control protocol - * - * Copyright 2015 Google Inc. - * Copyright 2015 Linaro Ltd. - */ - -#ifndef __CONTROL_H -#define __CONTROL_H - -struct gb_control { - struct device dev; - struct gb_interface *intf; - - struct gb_connection *connection; - - u8 protocol_major; - u8 protocol_minor; - - bool has_bundle_activate; - bool has_bundle_version; - - char *vendor_string; - char *product_string; -}; -#define to_gb_control(d) container_of(d, struct gb_control, dev) - -struct gb_control *gb_control_create(struct gb_interface *intf); -int gb_control_enable(struct gb_control *control); -void gb_control_disable(struct gb_control *control); -int gb_control_suspend(struct gb_control *control); -int gb_control_resume(struct gb_control *control); -int gb_control_add(struct gb_control *control); -void gb_control_del(struct gb_control *control); -struct gb_control *gb_control_get(struct gb_control *control); -void gb_control_put(struct gb_control *control); - -int gb_control_get_bundle_versions(struct gb_control *control); -int gb_control_connected_operation(struct gb_control *control, u16 cport_id); -int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); -int gb_control_disconnecting_operation(struct gb_control *control, - u16 cport_id); -int gb_control_mode_switch_operation(struct gb_control *control); -void gb_control_mode_switch_prepare(struct gb_control *control); -void gb_control_mode_switch_complete(struct gb_control *control); -int gb_control_get_manifest_size_operation(struct gb_interface *intf); -int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, - size_t size); -int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id); -int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id); -int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id); -int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id); -int gb_control_interface_suspend_prepare(struct gb_control *control); -int gb_control_interface_deactivate_prepare(struct gb_control *control); -int gb_control_interface_hibernate_abort(struct gb_control *control); -#endif /* __CONTROL_H */ diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c index d6b0d49130c0..e546c6431877 100644 --- a/drivers/staging/greybus/core.c +++ b/drivers/staging/greybus/core.c @@ -9,7 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define CREATE_TRACE_POINTS -#include "greybus.h" +#include #include "greybus_trace.h" #define GB_BUNDLE_AUTOSUSPEND_MS 3000 diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c index 56e20c30feb5..e102d7badb9d 100644 --- a/drivers/staging/greybus/debugfs.c +++ b/drivers/staging/greybus/debugfs.c @@ -7,8 +7,7 @@ */ #include - -#include "greybus.h" +#include static struct dentry *gb_debug_root; diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c index be6af18cec31..366716f11b1a 100644 --- a/drivers/staging/greybus/es2.c +++ b/drivers/staging/greybus/es2.c @@ -11,12 +11,11 @@ #include #include #include +#include #include #include "arpc.h" -#include "greybus.h" #include "greybus_trace.h" -#include "connection.h" /* Default timeout for USB vendor requests. */ diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h index 72dfabfa4704..5d2564462ffc 100644 --- a/drivers/staging/greybus/firmware.h +++ b/drivers/staging/greybus/firmware.h @@ -9,7 +9,7 @@ #ifndef __FIRMWARE_H #define __FIRMWARE_H -#include "greybus.h" +#include #define FW_NAME_PREFIX "gmp_" diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c index 388866d92f5b..57bebf24636b 100644 --- a/drivers/staging/greybus/fw-core.c +++ b/drivers/staging/greybus/fw-core.c @@ -8,8 +8,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include "firmware.h" -#include "greybus.h" #include "spilib.h" struct gb_fw_core { diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c index d3b7cccbc10d..543692c567f9 100644 --- a/drivers/staging/greybus/fw-download.c +++ b/drivers/staging/greybus/fw-download.c @@ -10,8 +10,8 @@ #include #include #include +#include #include "firmware.h" -#include "greybus.h" /* Estimated minimum buffer size, actual size can be smaller than this */ #define MIN_FETCH_SIZE 512 diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c index 71aec14f8181..687c6405c65b 100644 --- a/drivers/staging/greybus/fw-management.c +++ b/drivers/staging/greybus/fw-management.c @@ -13,10 +13,10 @@ #include #include #include +#include #include "firmware.h" #include "greybus_firmware.h" -#include "greybus.h" #define FW_MGMT_TIMEOUT_MS 1000 diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c index 6cb85c3d3572..9fc5c47be9bd 100644 --- a/drivers/staging/greybus/gbphy.c +++ b/drivers/staging/greybus/gbphy.c @@ -13,8 +13,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" #define GB_GBPHY_AUTOSUSPEND_MS 3000 diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 3151004d26fb..1ff34abd5692 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -13,8 +13,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" struct gb_gpio_line { diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h deleted file mode 100644 index f0488ffff93e..000000000000 --- a/drivers/staging/greybus/greybus.h +++ /dev/null @@ -1,152 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus driver and device API - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#ifndef __LINUX_GREYBUS_H -#define __LINUX_GREYBUS_H - -#ifdef __KERNEL__ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "greybus_id.h" -#include "greybus_manifest.h" -#include "greybus_protocols.h" -#include "manifest.h" -#include "hd.h" -#include "svc.h" -#include "control.h" -#include "module.h" -#include "interface.h" -#include "bundle.h" -#include "connection.h" -#include "operation.h" - -/* Matches up with the Greybus Protocol specification document */ -#define GREYBUS_VERSION_MAJOR 0x00 -#define GREYBUS_VERSION_MINOR 0x01 - -#define GREYBUS_ID_MATCH_DEVICE \ - (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT) - -#define GREYBUS_DEVICE(v, p) \ - .match_flags = GREYBUS_ID_MATCH_DEVICE, \ - .vendor = (v), \ - .product = (p), - -#define GREYBUS_DEVICE_CLASS(c) \ - .match_flags = GREYBUS_ID_MATCH_CLASS, \ - .class = (c), - -/* Maximum number of CPorts */ -#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */ -#define CPORT_ID_BAD U16_MAX - -struct greybus_driver { - const char *name; - - int (*probe)(struct gb_bundle *bundle, - const struct greybus_bundle_id *id); - void (*disconnect)(struct gb_bundle *bundle); - - const struct greybus_bundle_id *id_table; - - struct device_driver driver; -}; -#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver) - -static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data) -{ - dev_set_drvdata(&bundle->dev, data); -} - -static inline void *greybus_get_drvdata(struct gb_bundle *bundle) -{ - return dev_get_drvdata(&bundle->dev); -} - -/* Don't call these directly, use the module_greybus_driver() macro instead */ -int greybus_register_driver(struct greybus_driver *driver, - struct module *module, const char *mod_name); -void greybus_deregister_driver(struct greybus_driver *driver); - -/* define to get proper THIS_MODULE and KBUILD_MODNAME values */ -#define greybus_register(driver) \ - greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) -#define greybus_deregister(driver) \ - greybus_deregister_driver(driver) - -/** - * module_greybus_driver() - Helper macro for registering a Greybus driver - * @__greybus_driver: greybus_driver structure - * - * Helper macro for Greybus drivers to set up proper module init / exit - * functions. Replaces module_init() and module_exit() and keeps people from - * printing pointless things to the kernel log when their driver is loaded. - */ -#define module_greybus_driver(__greybus_driver) \ - module_driver(__greybus_driver, greybus_register, greybus_deregister) - -int greybus_disabled(void); - -void gb_debugfs_init(void); -void gb_debugfs_cleanup(void); -struct dentry *gb_debugfs_get(void); - -extern struct bus_type greybus_bus_type; - -extern struct device_type greybus_hd_type; -extern struct device_type greybus_module_type; -extern struct device_type greybus_interface_type; -extern struct device_type greybus_control_type; -extern struct device_type greybus_bundle_type; -extern struct device_type greybus_svc_type; - -static inline int is_gb_host_device(const struct device *dev) -{ - return dev->type == &greybus_hd_type; -} - -static inline int is_gb_module(const struct device *dev) -{ - return dev->type == &greybus_module_type; -} - -static inline int is_gb_interface(const struct device *dev) -{ - return dev->type == &greybus_interface_type; -} - -static inline int is_gb_control(const struct device *dev) -{ - return dev->type == &greybus_control_type; -} - -static inline int is_gb_bundle(const struct device *dev) -{ - return dev->type == &greybus_bundle_type; -} - -static inline int is_gb_svc(const struct device *dev) -{ - return dev->type == &greybus_svc_type; -} - -static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id) -{ - return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports; -} - -#endif /* __KERNEL__ */ -#endif /* __LINUX_GREYBUS_H */ diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h deleted file mode 100644 index f4c8440093e4..000000000000 --- a/drivers/staging/greybus/greybus_id.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* FIXME - * move this to include/linux/mod_devicetable.h when merging - */ - -#ifndef __LINUX_GREYBUS_ID_H -#define __LINUX_GREYBUS_ID_H - -#include -#include - - -struct greybus_bundle_id { - __u16 match_flags; - __u32 vendor; - __u32 product; - __u8 class; - - kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t)); -}; - -/* Used to match the greybus_bundle_id */ -#define GREYBUS_ID_MATCH_VENDOR BIT(0) -#define GREYBUS_ID_MATCH_PRODUCT BIT(1) -#define GREYBUS_ID_MATCH_CLASS BIT(2) - -#endif /* __LINUX_GREYBUS_ID_H */ diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h deleted file mode 100644 index db68f7e7d5a7..000000000000 --- a/drivers/staging/greybus/greybus_manifest.h +++ /dev/null @@ -1,178 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus manifest definition - * - * See "Greybus Application Protocol" document (version 0.1) for - * details on these values and structures. - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - * - * Released under the GPLv2 and BSD licenses. - */ - -#ifndef __GREYBUS_MANIFEST_H -#define __GREYBUS_MANIFEST_H - -enum greybus_descriptor_type { - GREYBUS_TYPE_INVALID = 0x00, - GREYBUS_TYPE_INTERFACE = 0x01, - GREYBUS_TYPE_STRING = 0x02, - GREYBUS_TYPE_BUNDLE = 0x03, - GREYBUS_TYPE_CPORT = 0x04, -}; - -enum greybus_protocol { - GREYBUS_PROTOCOL_CONTROL = 0x00, - /* 0x01 is unused */ - GREYBUS_PROTOCOL_GPIO = 0x02, - GREYBUS_PROTOCOL_I2C = 0x03, - GREYBUS_PROTOCOL_UART = 0x04, - GREYBUS_PROTOCOL_HID = 0x05, - GREYBUS_PROTOCOL_USB = 0x06, - GREYBUS_PROTOCOL_SDIO = 0x07, - GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08, - GREYBUS_PROTOCOL_PWM = 0x09, - /* 0x0a is unused */ - GREYBUS_PROTOCOL_SPI = 0x0b, - GREYBUS_PROTOCOL_DISPLAY = 0x0c, - GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d, - GREYBUS_PROTOCOL_SENSOR = 0x0e, - GREYBUS_PROTOCOL_LIGHTS = 0x0f, - GREYBUS_PROTOCOL_VIBRATOR = 0x10, - GREYBUS_PROTOCOL_LOOPBACK = 0x11, - GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12, - GREYBUS_PROTOCOL_AUDIO_DATA = 0x13, - GREYBUS_PROTOCOL_SVC = 0x14, - GREYBUS_PROTOCOL_BOOTROM = 0x15, - GREYBUS_PROTOCOL_CAMERA_DATA = 0x16, - GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17, - GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18, - GREYBUS_PROTOCOL_AUTHENTICATION = 0x19, - GREYBUS_PROTOCOL_LOG = 0x1a, - /* ... */ - GREYBUS_PROTOCOL_RAW = 0xfe, - GREYBUS_PROTOCOL_VENDOR = 0xff, -}; - -enum greybus_class_type { - GREYBUS_CLASS_CONTROL = 0x00, - /* 0x01 is unused */ - /* 0x02 is unused */ - /* 0x03 is unused */ - /* 0x04 is unused */ - GREYBUS_CLASS_HID = 0x05, - /* 0x06 is unused */ - /* 0x07 is unused */ - GREYBUS_CLASS_POWER_SUPPLY = 0x08, - /* 0x09 is unused */ - GREYBUS_CLASS_BRIDGED_PHY = 0x0a, - /* 0x0b is unused */ - GREYBUS_CLASS_DISPLAY = 0x0c, - GREYBUS_CLASS_CAMERA = 0x0d, - GREYBUS_CLASS_SENSOR = 0x0e, - GREYBUS_CLASS_LIGHTS = 0x0f, - GREYBUS_CLASS_VIBRATOR = 0x10, - GREYBUS_CLASS_LOOPBACK = 0x11, - GREYBUS_CLASS_AUDIO = 0x12, - /* 0x13 is unused */ - /* 0x14 is unused */ - GREYBUS_CLASS_BOOTROM = 0x15, - GREYBUS_CLASS_FW_MANAGEMENT = 0x16, - GREYBUS_CLASS_LOG = 0x17, - /* ... */ - GREYBUS_CLASS_RAW = 0xfe, - GREYBUS_CLASS_VENDOR = 0xff, -}; - -enum { - GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0), -}; - -/* - * The string in a string descriptor is not NUL-terminated. The - * size of the descriptor will be rounded up to a multiple of 4 - * bytes, by padding the string with 0x00 bytes if necessary. - */ -struct greybus_descriptor_string { - __u8 length; - __u8 id; - __u8 string[0]; -} __packed; - -/* - * An interface descriptor describes information about an interface as a whole, - * *not* the functions within it. - */ -struct greybus_descriptor_interface { - __u8 vendor_stringid; - __u8 product_stringid; - __u8 features; - __u8 pad; -} __packed; - -/* - * An bundle descriptor defines an identification number and a class for - * each bundle. - * - * @id: Uniquely identifies a bundle within a interface, its sole purpose is to - * allow CPort descriptors to specify which bundle they are associated with. - * The first bundle will have id 0, second will have 1 and so on. - * - * The largest CPort id associated with an bundle (defined by a - * CPort descriptor in the manifest) is used to determine how to - * encode the device id and module number in UniPro packets - * that use the bundle. - * - * @class: It is used by kernel to know the functionality provided by the - * bundle and will be matched against drivers functinality while probing greybus - * driver. It should contain one of the values defined in - * 'enum greybus_class_type'. - * - */ -struct greybus_descriptor_bundle { - __u8 id; /* interface-relative id (0..) */ - __u8 class; - __u8 pad[2]; -} __packed; - -/* - * A CPort descriptor indicates the id of the bundle within the - * module it's associated with, along with the CPort id used to - * address the CPort. The protocol id defines the format of messages - * exchanged using the CPort. - */ -struct greybus_descriptor_cport { - __le16 id; - __u8 bundle; - __u8 protocol_id; /* enum greybus_protocol */ -} __packed; - -struct greybus_descriptor_header { - __le16 size; - __u8 type; /* enum greybus_descriptor_type */ - __u8 pad; -} __packed; - -struct greybus_descriptor { - struct greybus_descriptor_header header; - union { - struct greybus_descriptor_string string; - struct greybus_descriptor_interface interface; - struct greybus_descriptor_bundle bundle; - struct greybus_descriptor_cport cport; - }; -} __packed; - -struct greybus_manifest_header { - __le16 size; - __u8 version_major; - __u8 version_minor; -} __packed; - -struct greybus_manifest { - struct greybus_manifest_header header; - struct greybus_descriptor descriptors[0]; -} __packed; - -#endif /* __GREYBUS_MANIFEST_H */ diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h deleted file mode 100644 index 5f34d1effb59..000000000000 --- a/drivers/staging/greybus/greybus_protocols.h +++ /dev/null @@ -1,2176 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ -/* - * Copyright(c) 2014 - 2015 Google Inc. All rights reserved. - * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved. - */ - -#ifndef __GREYBUS_PROTOCOLS_H -#define __GREYBUS_PROTOCOLS_H - -/* Fixed IDs for control/svc protocols */ - -/* SVC switch-port device ids */ -#define GB_SVC_DEVICE_ID_SVC 0 -#define GB_SVC_DEVICE_ID_AP 1 -#define GB_SVC_DEVICE_ID_MIN 2 -#define GB_SVC_DEVICE_ID_MAX 31 - -#define GB_SVC_CPORT_ID 0 -#define GB_CONTROL_BUNDLE_ID 0 -#define GB_CONTROL_CPORT_ID 0 - - -/* - * All operation messages (both requests and responses) begin with - * a header that encodes the size of the message (header included). - * This header also contains a unique identifier, that associates a - * response message with its operation. The header contains an - * operation type field, whose interpretation is dependent on what - * type of protocol is used over the connection. The high bit - * (0x80) of the operation type field is used to indicate whether - * the message is a request (clear) or a response (set). - * - * Response messages include an additional result byte, which - * communicates the result of the corresponding request. A zero - * result value means the operation completed successfully. Any - * other value indicates an error; in this case, the payload of the - * response message (if any) is ignored. The result byte must be - * zero in the header for a request message. - * - * The wire format for all numeric fields in the header is little - * endian. Any operation-specific data begins immediately after the - * header. - */ -struct gb_operation_msg_hdr { - __le16 size; /* Size in bytes of header + payload */ - __le16 operation_id; /* Operation unique id */ - __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ - __u8 result; /* Result of request (in responses only) */ - __u8 pad[2]; /* must be zero (ignore when read) */ -} __packed; - - -/* Generic request types */ -#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00 -#define GB_REQUEST_TYPE_INVALID 0x7f - -struct gb_cport_shutdown_request { - __u8 phase; -} __packed; - - -/* Control Protocol */ - -/* Greybus control request types */ -#define GB_CONTROL_TYPE_VERSION 0x01 -#define GB_CONTROL_TYPE_PROBE_AP 0x02 -#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03 -#define GB_CONTROL_TYPE_GET_MANIFEST 0x04 -#define GB_CONTROL_TYPE_CONNECTED 0x05 -#define GB_CONTROL_TYPE_DISCONNECTED 0x06 -#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07 -#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08 -#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09 -/* Unused 0x0a */ -#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b -#define GB_CONTROL_TYPE_DISCONNECTING 0x0c -#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d -#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e -#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f -#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10 -#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11 -#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12 -#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13 -#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14 -#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15 - -struct gb_control_version_request { - __u8 major; - __u8 minor; -} __packed; - -struct gb_control_version_response { - __u8 major; - __u8 minor; -} __packed; - -struct gb_control_bundle_version_request { - __u8 bundle_id; -} __packed; - -struct gb_control_bundle_version_response { - __u8 major; - __u8 minor; -} __packed; - -/* Control protocol manifest get size request has no payload*/ -struct gb_control_get_manifest_size_response { - __le16 size; -} __packed; - -/* Control protocol manifest get request has no payload */ -struct gb_control_get_manifest_response { - __u8 data[0]; -} __packed; - -/* Control protocol [dis]connected request */ -struct gb_control_connected_request { - __le16 cport_id; -} __packed; - -struct gb_control_disconnecting_request { - __le16 cport_id; -} __packed; -/* disconnecting response has no payload */ - -struct gb_control_disconnected_request { - __le16 cport_id; -} __packed; -/* Control protocol [dis]connected response has no payload */ - -/* - * All Bundle power management operations use the same request and response - * layout and status codes. - */ - -#define GB_CONTROL_BUNDLE_PM_OK 0x00 -#define GB_CONTROL_BUNDLE_PM_INVAL 0x01 -#define GB_CONTROL_BUNDLE_PM_BUSY 0x02 -#define GB_CONTROL_BUNDLE_PM_FAIL 0x03 -#define GB_CONTROL_BUNDLE_PM_NA 0x04 - -struct gb_control_bundle_pm_request { - __u8 bundle_id; -} __packed; - -struct gb_control_bundle_pm_response { - __u8 status; -} __packed; - -/* - * Interface Suspend Prepare and Deactivate Prepare operations use the same - * response layout and error codes. Define a single response structure and reuse - * it. Both operations have no payload. - */ - -#define GB_CONTROL_INTF_PM_OK 0x00 -#define GB_CONTROL_INTF_PM_BUSY 0x01 -#define GB_CONTROL_INTF_PM_NA 0x02 - -struct gb_control_intf_pm_response { - __u8 status; -} __packed; - -/* APBridge protocol */ - -/* request APB1 log */ -#define GB_APB_REQUEST_LOG 0x02 - -/* request to map a cport to bulk in and bulk out endpoints */ -#define GB_APB_REQUEST_EP_MAPPING 0x03 - -/* request to get the number of cports available */ -#define GB_APB_REQUEST_CPORT_COUNT 0x04 - -/* request to reset a cport state */ -#define GB_APB_REQUEST_RESET_CPORT 0x05 - -/* request to time the latency of messages on a given cport */ -#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06 -#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07 - -/* request to control the CSI transmitter */ -#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08 - -/* request to control audio streaming */ -#define GB_APB_REQUEST_AUDIO_CONTROL 0x09 - -/* TimeSync requests */ -#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d -#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e -#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f -#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10 - -/* requests to set Greybus CPort flags */ -#define GB_APB_REQUEST_CPORT_FLAGS 0x11 - -/* ARPC request */ -#define GB_APB_REQUEST_ARPC_RUN 0x12 - -struct gb_apb_request_cport_flags { - __le32 flags; -#define GB_APB_CPORT_FLAG_CONTROL 0x01 -#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02 -} __packed; - - -/* Firmware Download Protocol */ - -/* Request Types */ -#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01 -#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02 -#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03 - -#define GB_FIRMWARE_TAG_MAX_SIZE 10 - -/* firmware download find firmware request/response */ -struct gb_fw_download_find_firmware_request { - __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; -} __packed; - -struct gb_fw_download_find_firmware_response { - __u8 firmware_id; - __le32 size; -} __packed; - -/* firmware download fetch firmware request/response */ -struct gb_fw_download_fetch_firmware_request { - __u8 firmware_id; - __le32 offset; - __le32 size; -} __packed; - -struct gb_fw_download_fetch_firmware_response { - __u8 data[0]; -} __packed; - -/* firmware download release firmware request */ -struct gb_fw_download_release_firmware_request { - __u8 firmware_id; -} __packed; -/* firmware download release firmware response has no payload */ - - -/* Firmware Management Protocol */ - -/* Request Types */ -#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01 -#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02 -#define GB_FW_MGMT_TYPE_LOADED_FW 0x03 -#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04 -#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05 -#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06 - -#define GB_FW_LOAD_METHOD_UNIPRO 0x01 -#define GB_FW_LOAD_METHOD_INTERNAL 0x02 - -#define GB_FW_LOAD_STATUS_FAILED 0x00 -#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01 -#define GB_FW_LOAD_STATUS_VALIDATED 0x02 -#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03 - -#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01 -#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02 -#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03 -#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04 -#define GB_FW_BACKEND_FW_STATUS_INT 0x05 -#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06 -#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07 - -#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01 -#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02 -#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03 -#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04 -#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05 - -/* firmware management interface firmware version request has no payload */ -struct gb_fw_mgmt_interface_fw_version_response { - __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; - __le16 major; - __le16 minor; -} __packed; - -/* firmware management load and validate firmware request/response */ -struct gb_fw_mgmt_load_and_validate_fw_request { - __u8 request_id; - __u8 load_method; - __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; -} __packed; -/* firmware management load and validate firmware response has no payload*/ - -/* firmware management loaded firmware request */ -struct gb_fw_mgmt_loaded_fw_request { - __u8 request_id; - __u8 status; - __le16 major; - __le16 minor; -} __packed; -/* firmware management loaded firmware response has no payload */ - -/* firmware management backend firmware version request/response */ -struct gb_fw_mgmt_backend_fw_version_request { - __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; -} __packed; - -struct gb_fw_mgmt_backend_fw_version_response { - __le16 major; - __le16 minor; - __u8 status; -} __packed; - -/* firmware management backend firmware update request */ -struct gb_fw_mgmt_backend_fw_update_request { - __u8 request_id; - __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; -} __packed; -/* firmware management backend firmware update response has no payload */ - -/* firmware management backend firmware updated request */ -struct gb_fw_mgmt_backend_fw_updated_request { - __u8 request_id; - __u8 status; -} __packed; -/* firmware management backend firmware updated response has no payload */ - - -/* Component Authentication Protocol (CAP) */ - -/* Request Types */ -#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01 -#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02 -#define GB_CAP_TYPE_AUTHENTICATE 0x03 - -/* CAP get endpoint uid request has no payload */ -struct gb_cap_get_endpoint_uid_response { - __u8 uid[8]; -} __packed; - -/* CAP get endpoint ims certificate request/response */ -struct gb_cap_get_ims_certificate_request { - __le32 certificate_class; - __le32 certificate_id; -} __packed; - -struct gb_cap_get_ims_certificate_response { - __u8 result_code; - __u8 certificate[0]; -} __packed; - -/* CAP authenticate request/response */ -struct gb_cap_authenticate_request { - __le32 auth_type; - __u8 uid[8]; - __u8 challenge[32]; -} __packed; - -struct gb_cap_authenticate_response { - __u8 result_code; - __u8 response[64]; - __u8 signature[0]; -} __packed; - - -/* Bootrom Protocol */ - -/* Version of the Greybus bootrom protocol we support */ -#define GB_BOOTROM_VERSION_MAJOR 0x00 -#define GB_BOOTROM_VERSION_MINOR 0x01 - -/* Greybus bootrom request types */ -#define GB_BOOTROM_TYPE_VERSION 0x01 -#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02 -#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03 -#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04 -#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */ -#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */ - -/* Greybus bootrom boot stages */ -#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */ -#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */ -#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */ - -/* Greybus bootrom ready to boot status */ -#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */ -#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */ -#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */ - -/* Max bootrom data fetch size in bytes */ -#define GB_BOOTROM_FETCH_MAX 2000 - -struct gb_bootrom_version_request { - __u8 major; - __u8 minor; -} __packed; - -struct gb_bootrom_version_response { - __u8 major; - __u8 minor; -} __packed; - -/* Bootrom protocol firmware size request/response */ -struct gb_bootrom_firmware_size_request { - __u8 stage; -} __packed; - -struct gb_bootrom_firmware_size_response { - __le32 size; -} __packed; - -/* Bootrom protocol get firmware request/response */ -struct gb_bootrom_get_firmware_request { - __le32 offset; - __le32 size; -} __packed; - -struct gb_bootrom_get_firmware_response { - __u8 data[0]; -} __packed; - -/* Bootrom protocol Ready to boot request */ -struct gb_bootrom_ready_to_boot_request { - __u8 status; -} __packed; -/* Bootrom protocol Ready to boot response has no payload */ - -/* Bootrom protocol get VID/PID request has no payload */ -struct gb_bootrom_get_vid_pid_response { - __le32 vendor_id; - __le32 product_id; -} __packed; - - -/* Power Supply */ - -/* Greybus power supply request types */ -#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02 -#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03 -#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04 -#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05 -#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06 -#define GB_POWER_SUPPLY_TYPE_EVENT 0x07 - -/* Greybus power supply battery technologies types */ -#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000 -#define GB_POWER_SUPPLY_TECH_NiMH 0x0001 -#define GB_POWER_SUPPLY_TECH_LION 0x0002 -#define GB_POWER_SUPPLY_TECH_LIPO 0x0003 -#define GB_POWER_SUPPLY_TECH_LiFe 0x0004 -#define GB_POWER_SUPPLY_TECH_NiCd 0x0005 -#define GB_POWER_SUPPLY_TECH_LiMn 0x0006 - -/* Greybus power supply types */ -#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000 -#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001 -#define GB_POWER_SUPPLY_UPS_TYPE 0x0002 -#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003 -#define GB_POWER_SUPPLY_USB_TYPE 0x0004 -#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005 -#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006 -#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007 - -/* Greybus power supply health values */ -#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000 -#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001 -#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002 -#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003 -#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004 -#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005 -#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006 -#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007 -#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008 - -/* Greybus power supply status values */ -#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000 -#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001 -#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002 -#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003 -#define GB_POWER_SUPPLY_STATUS_FULL 0x0004 - -/* Greybus power supply capacity level values */ -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000 -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001 -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002 -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003 -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004 -#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005 - -/* Greybus power supply scope values */ -#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000 -#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001 -#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002 - -struct gb_power_supply_get_supplies_response { - __u8 supplies_count; -} __packed; - -struct gb_power_supply_get_description_request { - __u8 psy_id; -} __packed; - -struct gb_power_supply_get_description_response { - __u8 manufacturer[32]; - __u8 model[32]; - __u8 serial_number[32]; - __le16 type; - __u8 properties_count; -} __packed; - -struct gb_power_supply_props_desc { - __u8 property; -#define GB_POWER_SUPPLY_PROP_STATUS 0x00 -#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01 -#define GB_POWER_SUPPLY_PROP_HEALTH 0x02 -#define GB_POWER_SUPPLY_PROP_PRESENT 0x03 -#define GB_POWER_SUPPLY_PROP_ONLINE 0x04 -#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05 -#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06 -#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07 -#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08 -#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09 -#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A -#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B -#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C -#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D -#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E -#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F -#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10 -#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11 -#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12 -#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13 -#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14 -#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15 -#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16 -#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17 -#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18 -#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19 -#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A -#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B -#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C -#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D -#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E -#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F -#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20 -#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21 -#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22 -#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23 -#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24 -#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25 -#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26 -#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27 -#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28 -#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29 -#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A -#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B -#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C -#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D -#define GB_POWER_SUPPLY_PROP_TEMP 0x2E -#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F -#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30 -#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31 -#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32 -#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33 -#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34 -#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35 -#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36 -#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37 -#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38 -#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39 -#define GB_POWER_SUPPLY_PROP_TYPE 0x3A -#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B -#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C -#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D - __u8 is_writeable; -} __packed; - -struct gb_power_supply_get_property_descriptors_request { - __u8 psy_id; -} __packed; - -struct gb_power_supply_get_property_descriptors_response { - __u8 properties_count; - struct gb_power_supply_props_desc props[]; -} __packed; - -struct gb_power_supply_get_property_request { - __u8 psy_id; - __u8 property; -} __packed; - -struct gb_power_supply_get_property_response { - __le32 prop_val; -}; - -struct gb_power_supply_set_property_request { - __u8 psy_id; - __u8 property; - __le32 prop_val; -} __packed; - -struct gb_power_supply_event_request { - __u8 psy_id; - __u8 event; -#define GB_POWER_SUPPLY_UPDATE 0x01 -} __packed; - - -/* HID */ - -/* Greybus HID operation types */ -#define GB_HID_TYPE_GET_DESC 0x02 -#define GB_HID_TYPE_GET_REPORT_DESC 0x03 -#define GB_HID_TYPE_PWR_ON 0x04 -#define GB_HID_TYPE_PWR_OFF 0x05 -#define GB_HID_TYPE_GET_REPORT 0x06 -#define GB_HID_TYPE_SET_REPORT 0x07 -#define GB_HID_TYPE_IRQ_EVENT 0x08 - -/* Report type */ -#define GB_HID_INPUT_REPORT 0 -#define GB_HID_OUTPUT_REPORT 1 -#define GB_HID_FEATURE_REPORT 2 - -/* Different request/response structures */ -/* HID get descriptor response */ -struct gb_hid_desc_response { - __u8 bLength; - __le16 wReportDescLength; - __le16 bcdHID; - __le16 wProductID; - __le16 wVendorID; - __u8 bCountryCode; -} __packed; - -/* HID get report request/response */ -struct gb_hid_get_report_request { - __u8 report_type; - __u8 report_id; -} __packed; - -/* HID set report request */ -struct gb_hid_set_report_request { - __u8 report_type; - __u8 report_id; - __u8 report[0]; -} __packed; - -/* HID input report request, via interrupt pipe */ -struct gb_hid_input_report_request { - __u8 report[0]; -} __packed; - - -/* I2C */ - -/* Greybus i2c request types */ -#define GB_I2C_TYPE_FUNCTIONALITY 0x02 -#define GB_I2C_TYPE_TRANSFER 0x05 - -/* functionality request has no payload */ -struct gb_i2c_functionality_response { - __le32 functionality; -} __packed; - -/* - * Outgoing data immediately follows the op count and ops array. - * The data for each write (master -> slave) op in the array is sent - * in order, with no (e.g. pad) bytes separating them. - * - * Short reads cause the entire transfer request to fail So response - * payload consists only of bytes read, and the number of bytes is - * exactly what was specified in the corresponding op. Like - * outgoing data, the incoming data is in order and contiguous. - */ -struct gb_i2c_transfer_op { - __le16 addr; - __le16 flags; - __le16 size; -} __packed; - -struct gb_i2c_transfer_request { - __le16 op_count; - struct gb_i2c_transfer_op ops[0]; /* op_count of these */ -} __packed; -struct gb_i2c_transfer_response { - __u8 data[0]; /* inbound data */ -} __packed; - - -/* GPIO */ - -/* Greybus GPIO request types */ -#define GB_GPIO_TYPE_LINE_COUNT 0x02 -#define GB_GPIO_TYPE_ACTIVATE 0x03 -#define GB_GPIO_TYPE_DEACTIVATE 0x04 -#define GB_GPIO_TYPE_GET_DIRECTION 0x05 -#define GB_GPIO_TYPE_DIRECTION_IN 0x06 -#define GB_GPIO_TYPE_DIRECTION_OUT 0x07 -#define GB_GPIO_TYPE_GET_VALUE 0x08 -#define GB_GPIO_TYPE_SET_VALUE 0x09 -#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a -#define GB_GPIO_TYPE_IRQ_TYPE 0x0b -#define GB_GPIO_TYPE_IRQ_MASK 0x0c -#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d -#define GB_GPIO_TYPE_IRQ_EVENT 0x0e - -#define GB_GPIO_IRQ_TYPE_NONE 0x00 -#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01 -#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 -#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 -#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 -#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 - -/* line count request has no payload */ -struct gb_gpio_line_count_response { - __u8 count; -} __packed; - -struct gb_gpio_activate_request { - __u8 which; -} __packed; -/* activate response has no payload */ - -struct gb_gpio_deactivate_request { - __u8 which; -} __packed; -/* deactivate response has no payload */ - -struct gb_gpio_get_direction_request { - __u8 which; -} __packed; -struct gb_gpio_get_direction_response { - __u8 direction; -} __packed; - -struct gb_gpio_direction_in_request { - __u8 which; -} __packed; -/* direction in response has no payload */ - -struct gb_gpio_direction_out_request { - __u8 which; - __u8 value; -} __packed; -/* direction out response has no payload */ - -struct gb_gpio_get_value_request { - __u8 which; -} __packed; -struct gb_gpio_get_value_response { - __u8 value; -} __packed; - -struct gb_gpio_set_value_request { - __u8 which; - __u8 value; -} __packed; -/* set value response has no payload */ - -struct gb_gpio_set_debounce_request { - __u8 which; - __le16 usec; -} __packed; -/* debounce response has no payload */ - -struct gb_gpio_irq_type_request { - __u8 which; - __u8 type; -} __packed; -/* irq type response has no payload */ - -struct gb_gpio_irq_mask_request { - __u8 which; -} __packed; -/* irq mask response has no payload */ - -struct gb_gpio_irq_unmask_request { - __u8 which; -} __packed; -/* irq unmask response has no payload */ - -/* irq event requests originate on another module and are handled on the AP */ -struct gb_gpio_irq_event_request { - __u8 which; -} __packed; -/* irq event has no response */ - - -/* PWM */ - -/* Greybus PWM operation types */ -#define GB_PWM_TYPE_PWM_COUNT 0x02 -#define GB_PWM_TYPE_ACTIVATE 0x03 -#define GB_PWM_TYPE_DEACTIVATE 0x04 -#define GB_PWM_TYPE_CONFIG 0x05 -#define GB_PWM_TYPE_POLARITY 0x06 -#define GB_PWM_TYPE_ENABLE 0x07 -#define GB_PWM_TYPE_DISABLE 0x08 - -/* pwm count request has no payload */ -struct gb_pwm_count_response { - __u8 count; -} __packed; - -struct gb_pwm_activate_request { - __u8 which; -} __packed; - -struct gb_pwm_deactivate_request { - __u8 which; -} __packed; - -struct gb_pwm_config_request { - __u8 which; - __le32 duty; - __le32 period; -} __packed; - -struct gb_pwm_polarity_request { - __u8 which; - __u8 polarity; -} __packed; - -struct gb_pwm_enable_request { - __u8 which; -} __packed; - -struct gb_pwm_disable_request { - __u8 which; -} __packed; - -/* SPI */ - -/* Should match up with modes in linux/spi/spi.h */ -#define GB_SPI_MODE_CPHA 0x01 /* clock phase */ -#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */ -#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */ -#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA) -#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0) -#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA) -#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */ -#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */ -#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */ -#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */ -#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */ -#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */ - -/* Should match up with flags in linux/spi/spi.h */ -#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */ -#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */ -#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */ - -/* Greybus spi operation types */ -#define GB_SPI_TYPE_MASTER_CONFIG 0x02 -#define GB_SPI_TYPE_DEVICE_CONFIG 0x03 -#define GB_SPI_TYPE_TRANSFER 0x04 - -/* mode request has no payload */ -struct gb_spi_master_config_response { - __le32 bits_per_word_mask; - __le32 min_speed_hz; - __le32 max_speed_hz; - __le16 mode; - __le16 flags; - __u8 num_chipselect; -} __packed; - -struct gb_spi_device_config_request { - __u8 chip_select; -} __packed; - -struct gb_spi_device_config_response { - __le16 mode; - __u8 bits_per_word; - __le32 max_speed_hz; - __u8 device_type; -#define GB_SPI_SPI_DEV 0x00 -#define GB_SPI_SPI_NOR 0x01 -#define GB_SPI_SPI_MODALIAS 0x02 - __u8 name[32]; -} __packed; - -/** - * struct gb_spi_transfer - a read/write buffer pair - * @speed_hz: Select a speed other than the device default for this transfer. If - * 0 the default (from @spi_device) is used. - * @len: size of rx and tx buffers (in bytes) - * @delay_usecs: microseconds to delay after this transfer before (optionally) - * changing the chipselect status, then starting the next transfer or - * completing this spi_message. - * @cs_change: affects chipselect after this transfer completes - * @bits_per_word: select a bits_per_word other than the device default for this - * transfer. If 0 the default (from @spi_device) is used. - */ -struct gb_spi_transfer { - __le32 speed_hz; - __le32 len; - __le16 delay_usecs; - __u8 cs_change; - __u8 bits_per_word; - __u8 xfer_flags; -#define GB_SPI_XFER_READ 0x01 -#define GB_SPI_XFER_WRITE 0x02 -#define GB_SPI_XFER_INPROGRESS 0x04 -} __packed; - -struct gb_spi_transfer_request { - __u8 chip_select; /* of the spi device */ - __u8 mode; /* of the spi device */ - __le16 count; - struct gb_spi_transfer transfers[0]; /* count of these */ -} __packed; - -struct gb_spi_transfer_response { - __u8 data[0]; /* inbound data */ -} __packed; - -/* Version of the Greybus SVC protocol we support */ -#define GB_SVC_VERSION_MAJOR 0x00 -#define GB_SVC_VERSION_MINOR 0x01 - -/* Greybus SVC request types */ -#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01 -#define GB_SVC_TYPE_SVC_HELLO 0x02 -#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03 -#define GB_SVC_TYPE_INTF_RESET 0x06 -#define GB_SVC_TYPE_CONN_CREATE 0x07 -#define GB_SVC_TYPE_CONN_DESTROY 0x08 -#define GB_SVC_TYPE_DME_PEER_GET 0x09 -#define GB_SVC_TYPE_DME_PEER_SET 0x0a -#define GB_SVC_TYPE_ROUTE_CREATE 0x0b -#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c -#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d -#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e -#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f -#define GB_SVC_TYPE_INTF_SET_PWRM 0x10 -#define GB_SVC_TYPE_INTF_EJECT 0x11 -#define GB_SVC_TYPE_PING 0x13 -#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14 -#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15 -#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16 -#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17 -#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18 -#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19 -#define GB_SVC_TYPE_TIMESYNC_PING 0x1a -#define GB_SVC_TYPE_MODULE_INSERTED 0x1f -#define GB_SVC_TYPE_MODULE_REMOVED 0x20 -#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21 -#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22 -#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23 -#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24 -#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25 -#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26 -#define GB_SVC_TYPE_INTF_ACTIVATE 0x27 -#define GB_SVC_TYPE_INTF_RESUME 0x28 -#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29 -#define GB_SVC_TYPE_INTF_OOPS 0x2a - -/* Greybus SVC protocol status values */ -#define GB_SVC_OP_SUCCESS 0x00 -#define GB_SVC_OP_UNKNOWN_ERROR 0x01 -#define GB_SVC_INTF_NOT_DETECTED 0x02 -#define GB_SVC_INTF_NO_UPRO_LINK 0x03 -#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04 -#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05 -#define GB_SVC_INTF_NO_V_SYS 0x06 -#define GB_SVC_INTF_V_CHG 0x07 -#define GB_SVC_INTF_WAKE_BUSY 0x08 -#define GB_SVC_INTF_NO_REFCLK 0x09 -#define GB_SVC_INTF_RELEASING 0x0a -#define GB_SVC_INTF_NO_ORDER 0x0b -#define GB_SVC_INTF_MBOX_SET 0x0c -#define GB_SVC_INTF_BAD_MBOX 0x0d -#define GB_SVC_INTF_OP_TIMEOUT 0x0e -#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f - -struct gb_svc_version_request { - __u8 major; - __u8 minor; -} __packed; - -struct gb_svc_version_response { - __u8 major; - __u8 minor; -} __packed; - -/* SVC protocol hello request */ -struct gb_svc_hello_request { - __le16 endo_id; - __u8 interface_id; -} __packed; -/* hello response has no payload */ - -struct gb_svc_intf_device_id_request { - __u8 intf_id; - __u8 device_id; -} __packed; -/* device id response has no payload */ - -struct gb_svc_intf_reset_request { - __u8 intf_id; -} __packed; -/* interface reset response has no payload */ - -struct gb_svc_intf_eject_request { - __u8 intf_id; -} __packed; -/* interface eject response has no payload */ - -struct gb_svc_conn_create_request { - __u8 intf1_id; - __le16 cport1_id; - __u8 intf2_id; - __le16 cport2_id; - __u8 tc; - __u8 flags; -} __packed; -/* connection create response has no payload */ - -struct gb_svc_conn_destroy_request { - __u8 intf1_id; - __le16 cport1_id; - __u8 intf2_id; - __le16 cport2_id; -} __packed; -/* connection destroy response has no payload */ - -struct gb_svc_dme_peer_get_request { - __u8 intf_id; - __le16 attr; - __le16 selector; -} __packed; - -struct gb_svc_dme_peer_get_response { - __le16 result_code; - __le32 attr_value; -} __packed; - -struct gb_svc_dme_peer_set_request { - __u8 intf_id; - __le16 attr; - __le16 selector; - __le32 value; -} __packed; - -struct gb_svc_dme_peer_set_response { - __le16 result_code; -} __packed; - -/* Greybus init-status values, currently retrieved using DME peer gets. */ -#define GB_INIT_SPI_BOOT_STARTED 0x02 -#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03 -#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04 -#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06 -#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09 -#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D - -struct gb_svc_route_create_request { - __u8 intf1_id; - __u8 dev1_id; - __u8 intf2_id; - __u8 dev2_id; -} __packed; -/* route create response has no payload */ - -struct gb_svc_route_destroy_request { - __u8 intf1_id; - __u8 intf2_id; -} __packed; -/* route destroy response has no payload */ - -/* used for svc_intf_vsys_{enable,disable} */ -struct gb_svc_intf_vsys_request { - __u8 intf_id; -} __packed; - -struct gb_svc_intf_vsys_response { - __u8 result_code; -#define GB_SVC_INTF_VSYS_OK 0x00 - /* 0x01 is reserved */ -#define GB_SVC_INTF_VSYS_FAIL 0x02 -} __packed; - -/* used for svc_intf_refclk_{enable,disable} */ -struct gb_svc_intf_refclk_request { - __u8 intf_id; -} __packed; - -struct gb_svc_intf_refclk_response { - __u8 result_code; -#define GB_SVC_INTF_REFCLK_OK 0x00 - /* 0x01 is reserved */ -#define GB_SVC_INTF_REFCLK_FAIL 0x02 -} __packed; - -/* used for svc_intf_unipro_{enable,disable} */ -struct gb_svc_intf_unipro_request { - __u8 intf_id; -} __packed; - -struct gb_svc_intf_unipro_response { - __u8 result_code; -#define GB_SVC_INTF_UNIPRO_OK 0x00 - /* 0x01 is reserved */ -#define GB_SVC_INTF_UNIPRO_FAIL 0x02 -#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03 -} __packed; - -#define GB_SVC_UNIPRO_FAST_MODE 0x01 -#define GB_SVC_UNIPRO_SLOW_MODE 0x02 -#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04 -#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05 -#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07 -#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11 -#define GB_SVC_UNIPRO_OFF_MODE 0x12 - -#define GB_SVC_SMALL_AMPLITUDE 0x01 -#define GB_SVC_LARGE_AMPLITUDE 0x02 - -#define GB_SVC_NO_DE_EMPHASIS 0x00 -#define GB_SVC_SMALL_DE_EMPHASIS 0x01 -#define GB_SVC_LARGE_DE_EMPHASIS 0x02 - -#define GB_SVC_PWRM_RXTERMINATION 0x01 -#define GB_SVC_PWRM_TXTERMINATION 0x02 -#define GB_SVC_PWRM_LINE_RESET 0x04 -#define GB_SVC_PWRM_SCRAMBLING 0x20 - -#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001 - -#define GB_SVC_UNIPRO_HS_SERIES_A 0x01 -#define GB_SVC_UNIPRO_HS_SERIES_B 0x02 - -#define GB_SVC_SETPWRM_PWR_OK 0x00 -#define GB_SVC_SETPWRM_PWR_LOCAL 0x01 -#define GB_SVC_SETPWRM_PWR_REMOTE 0x02 -#define GB_SVC_SETPWRM_PWR_BUSY 0x03 -#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04 -#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05 - -struct gb_svc_l2_timer_cfg { - __le16 tsb_fc0_protection_timeout; - __le16 tsb_tc0_replay_timeout; - __le16 tsb_afc0_req_timeout; - __le16 tsb_fc1_protection_timeout; - __le16 tsb_tc1_replay_timeout; - __le16 tsb_afc1_req_timeout; - __le16 reserved_for_tc2[3]; - __le16 reserved_for_tc3[3]; -} __packed; - -struct gb_svc_intf_set_pwrm_request { - __u8 intf_id; - __u8 hs_series; - __u8 tx_mode; - __u8 tx_gear; - __u8 tx_nlanes; - __u8 tx_amplitude; - __u8 tx_hs_equalizer; - __u8 rx_mode; - __u8 rx_gear; - __u8 rx_nlanes; - __u8 flags; - __le32 quirks; - struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata; -} __packed; - -struct gb_svc_intf_set_pwrm_response { - __u8 result_code; -} __packed; - -struct gb_svc_key_event_request { - __le16 key_code; -#define GB_KEYCODE_ARA 0x00 - - __u8 key_event; -#define GB_SVC_KEY_RELEASED 0x00 -#define GB_SVC_KEY_PRESSED 0x01 -} __packed; - -#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254 - -struct gb_svc_pwrmon_rail_count_get_response { - __u8 rail_count; -} __packed; - -#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32 - -struct gb_svc_pwrmon_rail_names_get_response { - __u8 status; - __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; -} __packed; - -#define GB_SVC_PWRMON_TYPE_CURR 0x01 -#define GB_SVC_PWRMON_TYPE_VOL 0x02 -#define GB_SVC_PWRMON_TYPE_PWR 0x03 - -#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00 -#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01 -#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02 -#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03 - -struct gb_svc_pwrmon_sample_get_request { - __u8 rail_id; - __u8 measurement_type; -} __packed; - -struct gb_svc_pwrmon_sample_get_response { - __u8 result; - __le32 measurement; -} __packed; - -struct gb_svc_pwrmon_intf_sample_get_request { - __u8 intf_id; - __u8 measurement_type; -} __packed; - -struct gb_svc_pwrmon_intf_sample_get_response { - __u8 result; - __le32 measurement; -} __packed; - -#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001 - -struct gb_svc_module_inserted_request { - __u8 primary_intf_id; - __u8 intf_count; - __le16 flags; -} __packed; -/* module_inserted response has no payload */ - -struct gb_svc_module_removed_request { - __u8 primary_intf_id; -} __packed; -/* module_removed response has no payload */ - -struct gb_svc_intf_activate_request { - __u8 intf_id; -} __packed; - -#define GB_SVC_INTF_TYPE_UNKNOWN 0x00 -#define GB_SVC_INTF_TYPE_DUMMY 0x01 -#define GB_SVC_INTF_TYPE_UNIPRO 0x02 -#define GB_SVC_INTF_TYPE_GREYBUS 0x03 - -struct gb_svc_intf_activate_response { - __u8 status; - __u8 intf_type; -} __packed; - -struct gb_svc_intf_resume_request { - __u8 intf_id; -} __packed; - -struct gb_svc_intf_resume_response { - __u8 status; -} __packed; - -#define GB_SVC_INTF_MAILBOX_NONE 0x00 -#define GB_SVC_INTF_MAILBOX_AP 0x01 -#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02 - -struct gb_svc_intf_mailbox_event_request { - __u8 intf_id; - __le16 result_code; - __le32 mailbox; -} __packed; -/* intf_mailbox_event response has no payload */ - -struct gb_svc_intf_oops_request { - __u8 intf_id; - __u8 reason; -} __packed; -/* intf_oops response has no payload */ - - -/* RAW */ - -/* Greybus raw request types */ -#define GB_RAW_TYPE_SEND 0x02 - -struct gb_raw_send_request { - __le32 len; - __u8 data[0]; -} __packed; - - -/* UART */ - -/* Greybus UART operation types */ -#define GB_UART_TYPE_SEND_DATA 0x02 -#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */ -#define GB_UART_TYPE_SET_LINE_CODING 0x04 -#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05 -#define GB_UART_TYPE_SEND_BREAK 0x06 -#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */ -#define GB_UART_TYPE_RECEIVE_CREDITS 0x08 -#define GB_UART_TYPE_FLUSH_FIFOS 0x09 - -/* Represents data from AP -> Module */ -struct gb_uart_send_data_request { - __le16 size; - __u8 data[0]; -} __packed; - -/* recv-data-request flags */ -#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */ -#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */ -#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */ -#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */ - -/* Represents data from Module -> AP */ -struct gb_uart_recv_data_request { - __le16 size; - __u8 flags; - __u8 data[0]; -} __packed; - -struct gb_uart_receive_credits_request { - __le16 count; -} __packed; - -struct gb_uart_set_line_coding_request { - __le32 rate; - __u8 format; -#define GB_SERIAL_1_STOP_BITS 0 -#define GB_SERIAL_1_5_STOP_BITS 1 -#define GB_SERIAL_2_STOP_BITS 2 - - __u8 parity; -#define GB_SERIAL_NO_PARITY 0 -#define GB_SERIAL_ODD_PARITY 1 -#define GB_SERIAL_EVEN_PARITY 2 -#define GB_SERIAL_MARK_PARITY 3 -#define GB_SERIAL_SPACE_PARITY 4 - - __u8 data_bits; - - __u8 flow_control; -#define GB_SERIAL_AUTO_RTSCTS_EN 0x1 -} __packed; - -/* output control lines */ -#define GB_UART_CTRL_DTR 0x01 -#define GB_UART_CTRL_RTS 0x02 - -struct gb_uart_set_control_line_state_request { - __u8 control; -} __packed; - -struct gb_uart_set_break_request { - __u8 state; -} __packed; - -/* input control lines and line errors */ -#define GB_UART_CTRL_DCD 0x01 -#define GB_UART_CTRL_DSR 0x02 -#define GB_UART_CTRL_RI 0x04 - -struct gb_uart_serial_state_request { - __u8 control; -} __packed; - -struct gb_uart_serial_flush_request { - __u8 flags; -#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01 -#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02 -} __packed; - -/* Loopback */ - -/* Greybus loopback request types */ -#define GB_LOOPBACK_TYPE_PING 0x02 -#define GB_LOOPBACK_TYPE_TRANSFER 0x03 -#define GB_LOOPBACK_TYPE_SINK 0x04 - -/* - * Loopback request/response header format should be identical - * to simplify bandwidth and data movement analysis. - */ -struct gb_loopback_transfer_request { - __le32 len; - __le32 reserved0; - __le32 reserved1; - __u8 data[0]; -} __packed; - -struct gb_loopback_transfer_response { - __le32 len; - __le32 reserved0; - __le32 reserved1; - __u8 data[0]; -} __packed; - -/* SDIO */ -/* Greybus SDIO operation types */ -#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02 -#define GB_SDIO_TYPE_SET_IOS 0x03 -#define GB_SDIO_TYPE_COMMAND 0x04 -#define GB_SDIO_TYPE_TRANSFER 0x05 -#define GB_SDIO_TYPE_EVENT 0x06 - -/* get caps response: request has no payload */ -struct gb_sdio_get_caps_response { - __le32 caps; -#define GB_SDIO_CAP_NONREMOVABLE 0x00000001 -#define GB_SDIO_CAP_4_BIT_DATA 0x00000002 -#define GB_SDIO_CAP_8_BIT_DATA 0x00000004 -#define GB_SDIO_CAP_MMC_HS 0x00000008 -#define GB_SDIO_CAP_SD_HS 0x00000010 -#define GB_SDIO_CAP_ERASE 0x00000020 -#define GB_SDIO_CAP_1_2V_DDR 0x00000040 -#define GB_SDIO_CAP_1_8V_DDR 0x00000080 -#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100 -#define GB_SDIO_CAP_UHS_SDR12 0x00000200 -#define GB_SDIO_CAP_UHS_SDR25 0x00000400 -#define GB_SDIO_CAP_UHS_SDR50 0x00000800 -#define GB_SDIO_CAP_UHS_SDR104 0x00001000 -#define GB_SDIO_CAP_UHS_DDR50 0x00002000 -#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000 -#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000 -#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000 -#define GB_SDIO_CAP_HS200_1_2V 0x00020000 -#define GB_SDIO_CAP_HS200_1_8V 0x00040000 -#define GB_SDIO_CAP_HS400_1_2V 0x00080000 -#define GB_SDIO_CAP_HS400_1_8V 0x00100000 - - /* see possible values below at vdd */ - __le32 ocr; - __le32 f_min; - __le32 f_max; - __le16 max_blk_count; - __le16 max_blk_size; -} __packed; - -/* set ios request: response has no payload */ -struct gb_sdio_set_ios_request { - __le32 clock; - __le32 vdd; -#define GB_SDIO_VDD_165_195 0x00000001 -#define GB_SDIO_VDD_20_21 0x00000002 -#define GB_SDIO_VDD_21_22 0x00000004 -#define GB_SDIO_VDD_22_23 0x00000008 -#define GB_SDIO_VDD_23_24 0x00000010 -#define GB_SDIO_VDD_24_25 0x00000020 -#define GB_SDIO_VDD_25_26 0x00000040 -#define GB_SDIO_VDD_26_27 0x00000080 -#define GB_SDIO_VDD_27_28 0x00000100 -#define GB_SDIO_VDD_28_29 0x00000200 -#define GB_SDIO_VDD_29_30 0x00000400 -#define GB_SDIO_VDD_30_31 0x00000800 -#define GB_SDIO_VDD_31_32 0x00001000 -#define GB_SDIO_VDD_32_33 0x00002000 -#define GB_SDIO_VDD_33_34 0x00004000 -#define GB_SDIO_VDD_34_35 0x00008000 -#define GB_SDIO_VDD_35_36 0x00010000 - - __u8 bus_mode; -#define GB_SDIO_BUSMODE_OPENDRAIN 0x00 -#define GB_SDIO_BUSMODE_PUSHPULL 0x01 - - __u8 power_mode; -#define GB_SDIO_POWER_OFF 0x00 -#define GB_SDIO_POWER_UP 0x01 -#define GB_SDIO_POWER_ON 0x02 -#define GB_SDIO_POWER_UNDEFINED 0x03 - - __u8 bus_width; -#define GB_SDIO_BUS_WIDTH_1 0x00 -#define GB_SDIO_BUS_WIDTH_4 0x02 -#define GB_SDIO_BUS_WIDTH_8 0x03 - - __u8 timing; -#define GB_SDIO_TIMING_LEGACY 0x00 -#define GB_SDIO_TIMING_MMC_HS 0x01 -#define GB_SDIO_TIMING_SD_HS 0x02 -#define GB_SDIO_TIMING_UHS_SDR12 0x03 -#define GB_SDIO_TIMING_UHS_SDR25 0x04 -#define GB_SDIO_TIMING_UHS_SDR50 0x05 -#define GB_SDIO_TIMING_UHS_SDR104 0x06 -#define GB_SDIO_TIMING_UHS_DDR50 0x07 -#define GB_SDIO_TIMING_MMC_DDR52 0x08 -#define GB_SDIO_TIMING_MMC_HS200 0x09 -#define GB_SDIO_TIMING_MMC_HS400 0x0A - - __u8 signal_voltage; -#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00 -#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01 -#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02 - - __u8 drv_type; -#define GB_SDIO_SET_DRIVER_TYPE_B 0x00 -#define GB_SDIO_SET_DRIVER_TYPE_A 0x01 -#define GB_SDIO_SET_DRIVER_TYPE_C 0x02 -#define GB_SDIO_SET_DRIVER_TYPE_D 0x03 -} __packed; - -/* command request */ -struct gb_sdio_command_request { - __u8 cmd; - __u8 cmd_flags; -#define GB_SDIO_RSP_NONE 0x00 -#define GB_SDIO_RSP_PRESENT 0x01 -#define GB_SDIO_RSP_136 0x02 -#define GB_SDIO_RSP_CRC 0x04 -#define GB_SDIO_RSP_BUSY 0x08 -#define GB_SDIO_RSP_OPCODE 0x10 - - __u8 cmd_type; -#define GB_SDIO_CMD_AC 0x00 -#define GB_SDIO_CMD_ADTC 0x01 -#define GB_SDIO_CMD_BC 0x02 -#define GB_SDIO_CMD_BCR 0x03 - - __le32 cmd_arg; - __le16 data_blocks; - __le16 data_blksz; -} __packed; - -struct gb_sdio_command_response { - __le32 resp[4]; -} __packed; - -/* transfer request */ -struct gb_sdio_transfer_request { - __u8 data_flags; -#define GB_SDIO_DATA_WRITE 0x01 -#define GB_SDIO_DATA_READ 0x02 -#define GB_SDIO_DATA_STREAM 0x04 - - __le16 data_blocks; - __le16 data_blksz; - __u8 data[0]; -} __packed; - -struct gb_sdio_transfer_response { - __le16 data_blocks; - __le16 data_blksz; - __u8 data[0]; -} __packed; - -/* event request: generated by module and is defined as unidirectional */ -struct gb_sdio_event_request { - __u8 event; -#define GB_SDIO_CARD_INSERTED 0x01 -#define GB_SDIO_CARD_REMOVED 0x02 -#define GB_SDIO_WP 0x04 -} __packed; - -/* Camera */ - -/* Greybus Camera request types */ -#define GB_CAMERA_TYPE_CAPABILITIES 0x02 -#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03 -#define GB_CAMERA_TYPE_CAPTURE 0x04 -#define GB_CAMERA_TYPE_FLUSH 0x05 -#define GB_CAMERA_TYPE_METADATA 0x06 - -#define GB_CAMERA_MAX_STREAMS 4 -#define GB_CAMERA_MAX_SETTINGS_SIZE 8192 - -/* Greybus Camera Configure Streams request payload */ -struct gb_camera_stream_config_request { - __le16 width; - __le16 height; - __le16 format; - __le16 padding; -} __packed; - -struct gb_camera_configure_streams_request { - __u8 num_streams; - __u8 flags; -#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 - __le16 padding; - struct gb_camera_stream_config_request config[0]; -} __packed; - -/* Greybus Camera Configure Streams response payload */ -struct gb_camera_stream_config_response { - __le16 width; - __le16 height; - __le16 format; - __u8 virtual_channel; - __u8 data_type[2]; - __le16 max_pkt_size; - __u8 padding; - __le32 max_size; -} __packed; - -struct gb_camera_configure_streams_response { - __u8 num_streams; -#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01 - __u8 flags; - __u8 padding[2]; - __le32 data_rate; - struct gb_camera_stream_config_response config[0]; -}; - -/* Greybus Camera Capture request payload - response has no payload */ -struct gb_camera_capture_request { - __le32 request_id; - __u8 streams; - __u8 padding; - __le16 num_frames; - __u8 settings[0]; -} __packed; - -/* Greybus Camera Flush response payload - request has no payload */ -struct gb_camera_flush_response { - __le32 request_id; -} __packed; - -/* Greybus Camera Metadata request payload - operation has no response */ -struct gb_camera_metadata_request { - __le32 request_id; - __le16 frame_number; - __u8 stream; - __u8 padding; - __u8 metadata[0]; -} __packed; - -/* Lights */ - -/* Greybus Lights request types */ -#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02 -#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03 -#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04 -#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05 -#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06 -#define GB_LIGHTS_TYPE_SET_BLINK 0x07 -#define GB_LIGHTS_TYPE_SET_COLOR 0x08 -#define GB_LIGHTS_TYPE_SET_FADE 0x09 -#define GB_LIGHTS_TYPE_EVENT 0x0A -#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B -#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C -#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D -#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E - -/* Greybus Light modes */ - -/* - * if you add any specific mode below, update also the - * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly - */ -#define GB_CHANNEL_MODE_NONE 0x00000000 -#define GB_CHANNEL_MODE_BATTERY 0x00000001 -#define GB_CHANNEL_MODE_POWER 0x00000002 -#define GB_CHANNEL_MODE_WIRELESS 0x00000004 -#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008 -#define GB_CHANNEL_MODE_KEYBOARD 0x00000010 -#define GB_CHANNEL_MODE_BUTTONS 0x00000020 -#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040 -#define GB_CHANNEL_MODE_ATTENTION 0x00000080 -#define GB_CHANNEL_MODE_FLASH 0x00000100 -#define GB_CHANNEL_MODE_TORCH 0x00000200 -#define GB_CHANNEL_MODE_INDICATOR 0x00000400 - -/* Lights Mode valid bit values */ -#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF -#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000 - -/* Greybus Light Channels Flags */ -#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001 -#define GB_LIGHT_CHANNEL_FADER 0x00000002 -#define GB_LIGHT_CHANNEL_BLINK 0x00000004 - -/* get count of lights in module */ -struct gb_lights_get_lights_response { - __u8 lights_count; -} __packed; - -/* light config request payload */ -struct gb_lights_get_light_config_request { - __u8 id; -} __packed; - -/* light config response payload */ -struct gb_lights_get_light_config_response { - __u8 channel_count; - __u8 name[32]; -} __packed; - -/* channel config request payload */ -struct gb_lights_get_channel_config_request { - __u8 light_id; - __u8 channel_id; -} __packed; - -/* channel flash config request payload */ -struct gb_lights_get_channel_flash_config_request { - __u8 light_id; - __u8 channel_id; -} __packed; - -/* channel config response payload */ -struct gb_lights_get_channel_config_response { - __u8 max_brightness; - __le32 flags; - __le32 color; - __u8 color_name[32]; - __le32 mode; - __u8 mode_name[32]; -} __packed; - -/* channel flash config response payload */ -struct gb_lights_get_channel_flash_config_response { - __le32 intensity_min_uA; - __le32 intensity_max_uA; - __le32 intensity_step_uA; - __le32 timeout_min_us; - __le32 timeout_max_us; - __le32 timeout_step_us; -} __packed; - -/* blink request payload: response have no payload */ -struct gb_lights_blink_request { - __u8 light_id; - __u8 channel_id; - __le16 time_on_ms; - __le16 time_off_ms; -} __packed; - -/* set brightness request payload: response have no payload */ -struct gb_lights_set_brightness_request { - __u8 light_id; - __u8 channel_id; - __u8 brightness; -} __packed; - -/* set color request payload: response have no payload */ -struct gb_lights_set_color_request { - __u8 light_id; - __u8 channel_id; - __le32 color; -} __packed; - -/* set fade request payload: response have no payload */ -struct gb_lights_set_fade_request { - __u8 light_id; - __u8 channel_id; - __u8 fade_in; - __u8 fade_out; -} __packed; - -/* event request: generated by module */ -struct gb_lights_event_request { - __u8 light_id; - __u8 event; -#define GB_LIGHTS_LIGHT_CONFIG 0x01 -} __packed; - -/* set flash intensity request payload: response have no payload */ -struct gb_lights_set_flash_intensity_request { - __u8 light_id; - __u8 channel_id; - __le32 intensity_uA; -} __packed; - -/* set flash strobe state request payload: response have no payload */ -struct gb_lights_set_flash_strobe_request { - __u8 light_id; - __u8 channel_id; - __u8 state; -} __packed; - -/* set flash timeout request payload: response have no payload */ -struct gb_lights_set_flash_timeout_request { - __u8 light_id; - __u8 channel_id; - __le32 timeout_us; -} __packed; - -/* get flash fault request payload */ -struct gb_lights_get_flash_fault_request { - __u8 light_id; - __u8 channel_id; -} __packed; - -/* get flash fault response payload */ -struct gb_lights_get_flash_fault_response { - __le32 fault; -#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000 -#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001 -#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002 -#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004 -#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008 -#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010 -#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020 -#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040 -#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080 -} __packed; - -/* Audio */ - -#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02 -#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03 -#define GB_AUDIO_TYPE_GET_CONTROL 0x04 -#define GB_AUDIO_TYPE_SET_CONTROL 0x05 -#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06 -#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07 -#define GB_AUDIO_TYPE_GET_PCM 0x08 -#define GB_AUDIO_TYPE_SET_PCM 0x09 -#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a - /* 0x0b unused */ -#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c -#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d -#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e - /* 0x0f unused */ -#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10 -#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11 -#define GB_AUDIO_TYPE_JACK_EVENT 0x12 -#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13 -#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14 -#define GB_AUDIO_TYPE_SEND_DATA 0x15 - -/* Module must be able to buffer 10ms of audio data, minimum */ -#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000 - -#define GB_AUDIO_PCM_NAME_MAX 32 -#define AUDIO_DAI_NAME_MAX 32 -#define AUDIO_CONTROL_NAME_MAX 32 -#define AUDIO_CTL_ELEM_NAME_MAX 44 -#define AUDIO_ENUM_NAME_MAX 64 -#define AUDIO_WIDGET_NAME_MAX 32 - -/* See SNDRV_PCM_FMTBIT_* in Linux source */ -#define GB_AUDIO_PCM_FMT_S8 BIT(0) -#define GB_AUDIO_PCM_FMT_U8 BIT(1) -#define GB_AUDIO_PCM_FMT_S16_LE BIT(2) -#define GB_AUDIO_PCM_FMT_S16_BE BIT(3) -#define GB_AUDIO_PCM_FMT_U16_LE BIT(4) -#define GB_AUDIO_PCM_FMT_U16_BE BIT(5) -#define GB_AUDIO_PCM_FMT_S24_LE BIT(6) -#define GB_AUDIO_PCM_FMT_S24_BE BIT(7) -#define GB_AUDIO_PCM_FMT_U24_LE BIT(8) -#define GB_AUDIO_PCM_FMT_U24_BE BIT(9) -#define GB_AUDIO_PCM_FMT_S32_LE BIT(10) -#define GB_AUDIO_PCM_FMT_S32_BE BIT(11) -#define GB_AUDIO_PCM_FMT_U32_LE BIT(12) -#define GB_AUDIO_PCM_FMT_U32_BE BIT(13) - -/* See SNDRV_PCM_RATE_* in Linux source */ -#define GB_AUDIO_PCM_RATE_5512 BIT(0) -#define GB_AUDIO_PCM_RATE_8000 BIT(1) -#define GB_AUDIO_PCM_RATE_11025 BIT(2) -#define GB_AUDIO_PCM_RATE_16000 BIT(3) -#define GB_AUDIO_PCM_RATE_22050 BIT(4) -#define GB_AUDIO_PCM_RATE_32000 BIT(5) -#define GB_AUDIO_PCM_RATE_44100 BIT(6) -#define GB_AUDIO_PCM_RATE_48000 BIT(7) -#define GB_AUDIO_PCM_RATE_64000 BIT(8) -#define GB_AUDIO_PCM_RATE_88200 BIT(9) -#define GB_AUDIO_PCM_RATE_96000 BIT(10) -#define GB_AUDIO_PCM_RATE_176400 BIT(11) -#define GB_AUDIO_PCM_RATE_192000 BIT(12) - -#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1 -#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2 - -#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0) -#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1) - -/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */ -#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01 -#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02 -#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03 -#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06 - -/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */ -#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00 -#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01 -#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02 -#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03 -#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04 -#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05 -#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06 - -/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */ -#define GB_AUDIO_ACCESS_READ BIT(0) -#define GB_AUDIO_ACCESS_WRITE BIT(1) -#define GB_AUDIO_ACCESS_VOLATILE BIT(2) -#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3) -#define GB_AUDIO_ACCESS_TLV_READ BIT(4) -#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5) -#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6) -#define GB_AUDIO_ACCESS_INACTIVE BIT(7) -#define GB_AUDIO_ACCESS_LOCK BIT(8) -#define GB_AUDIO_ACCESS_OWNER BIT(9) - -/* enum snd_soc_dapm_type */ -#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0 -#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1 -#define GB_AUDIO_WIDGET_TYPE_MUX 0x2 -#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3 -#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4 -#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5 -#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6 -#define GB_AUDIO_WIDGET_TYPE_PGA 0x7 -#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8 -#define GB_AUDIO_WIDGET_TYPE_ADC 0x9 -#define GB_AUDIO_WIDGET_TYPE_DAC 0xa -#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb -#define GB_AUDIO_WIDGET_TYPE_MIC 0xc -#define GB_AUDIO_WIDGET_TYPE_HP 0xd -#define GB_AUDIO_WIDGET_TYPE_SPK 0xe -#define GB_AUDIO_WIDGET_TYPE_LINE 0xf -#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10 -#define GB_AUDIO_WIDGET_TYPE_VMID 0x11 -#define GB_AUDIO_WIDGET_TYPE_PRE 0x12 -#define GB_AUDIO_WIDGET_TYPE_POST 0x13 -#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14 -#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15 -#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16 -#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17 -#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18 -#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19 -#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a -#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b -#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c - -#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01 -#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02 - -#define GB_AUDIO_JACK_EVENT_INSERTION 0x1 -#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2 - -#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1 -#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2 - -#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1 -#define GB_AUDIO_STREAMING_EVENT_HALT 0x2 -#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3 -#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4 -#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5 -#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6 -#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7 -#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8 -#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9 - -#define GB_AUDIO_INVALID_INDEX 0xff - -/* enum snd_jack_types */ -#define GB_AUDIO_JACK_HEADPHONE 0x0000001 -#define GB_AUDIO_JACK_MICROPHONE 0x0000002 -#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \ - GB_AUDIO_JACK_MICROPHONE) -#define GB_AUDIO_JACK_LINEOUT 0x0000004 -#define GB_AUDIO_JACK_MECHANICAL 0x0000008 -#define GB_AUDIO_JACK_VIDEOOUT 0x0000010 -#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \ - GB_AUDIO_JACK_VIDEOOUT) -#define GB_AUDIO_JACK_LINEIN 0x0000020 -#define GB_AUDIO_JACK_OC_HPHL 0x0000040 -#define GB_AUDIO_JACK_OC_HPHR 0x0000080 -#define GB_AUDIO_JACK_MICROPHONE2 0x0000200 -#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \ - GB_AUDIO_JACK_MICROPHONE | \ - GB_AUDIO_JACK_MICROPHONE2) -/* Kept separate from switches to facilitate implementation */ -#define GB_AUDIO_JACK_BTN_0 0x4000000 -#define GB_AUDIO_JACK_BTN_1 0x2000000 -#define GB_AUDIO_JACK_BTN_2 0x1000000 -#define GB_AUDIO_JACK_BTN_3 0x0800000 - -struct gb_audio_pcm { - __u8 stream_name[GB_AUDIO_PCM_NAME_MAX]; - __le32 formats; /* GB_AUDIO_PCM_FMT_* */ - __le32 rates; /* GB_AUDIO_PCM_RATE_* */ - __u8 chan_min; - __u8 chan_max; - __u8 sig_bits; /* number of bits of content */ -} __packed; - -struct gb_audio_dai { - __u8 name[AUDIO_DAI_NAME_MAX]; - __le16 data_cport; - struct gb_audio_pcm capture; - struct gb_audio_pcm playback; -} __packed; - -struct gb_audio_integer { - __le32 min; - __le32 max; - __le32 step; -} __packed; - -struct gb_audio_integer64 { - __le64 min; - __le64 max; - __le64 step; -} __packed; - -struct gb_audio_enumerated { - __le32 items; - __le16 names_length; - __u8 names[0]; -} __packed; - -struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ - __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */ - __le16 dimen[4]; - union { - struct gb_audio_integer integer; - struct gb_audio_integer64 integer64; - struct gb_audio_enumerated enumerated; - } value; -} __packed; - -struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */ - __le64 timestamp; /* XXX needed? */ - union { - __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */ - __le64 integer64_value[2]; - __le32 enumerated_item[2]; - } value; -} __packed; - -struct gb_audio_control { - __u8 name[AUDIO_CONTROL_NAME_MAX]; - __u8 id; /* 0-63 */ - __u8 iface; /* GB_AUDIO_IFACE_* */ - __le16 data_cport; - __le32 access; /* GB_AUDIO_ACCESS_* */ - __u8 count; /* count of same elements */ - __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */ - struct gb_audio_ctl_elem_info info; -} __packed; - -struct gb_audio_widget { - __u8 name[AUDIO_WIDGET_NAME_MAX]; - __u8 sname[AUDIO_WIDGET_NAME_MAX]; - __u8 id; - __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ - __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ - __u8 ncontrols; - struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ -} __packed; - -struct gb_audio_route { - __u8 source_id; /* widget id */ - __u8 destination_id; /* widget id */ - __u8 control_id; /* 0-63 */ - __u8 index; /* Selection within the control */ -} __packed; - -struct gb_audio_topology { - __u8 num_dais; - __u8 num_controls; - __u8 num_widgets; - __u8 num_routes; - __le32 size_dais; - __le32 size_controls; - __le32 size_widgets; - __le32 size_routes; - __le32 jack_type; - /* - * struct gb_audio_dai dai[num_dais]; - * struct gb_audio_control controls[num_controls]; - * struct gb_audio_widget widgets[num_widgets]; - * struct gb_audio_route routes[num_routes]; - */ - __u8 data[0]; -} __packed; - -struct gb_audio_get_topology_size_response { - __le16 size; -} __packed; - -struct gb_audio_get_topology_response { - struct gb_audio_topology topology; -} __packed; - -struct gb_audio_get_control_request { - __u8 control_id; - __u8 index; -} __packed; - -struct gb_audio_get_control_response { - struct gb_audio_ctl_elem_value value; -} __packed; - -struct gb_audio_set_control_request { - __u8 control_id; - __u8 index; - struct gb_audio_ctl_elem_value value; -} __packed; - -struct gb_audio_enable_widget_request { - __u8 widget_id; -} __packed; - -struct gb_audio_disable_widget_request { - __u8 widget_id; -} __packed; - -struct gb_audio_get_pcm_request { - __le16 data_cport; -} __packed; - -struct gb_audio_get_pcm_response { - __le32 format; - __le32 rate; - __u8 channels; - __u8 sig_bits; -} __packed; - -struct gb_audio_set_pcm_request { - __le16 data_cport; - __le32 format; - __le32 rate; - __u8 channels; - __u8 sig_bits; -} __packed; - -struct gb_audio_set_tx_data_size_request { - __le16 data_cport; - __le16 size; -} __packed; - -struct gb_audio_activate_tx_request { - __le16 data_cport; -} __packed; - -struct gb_audio_deactivate_tx_request { - __le16 data_cport; -} __packed; - -struct gb_audio_set_rx_data_size_request { - __le16 data_cport; - __le16 size; -} __packed; - -struct gb_audio_activate_rx_request { - __le16 data_cport; -} __packed; - -struct gb_audio_deactivate_rx_request { - __le16 data_cport; -} __packed; - -struct gb_audio_jack_event_request { - __u8 widget_id; - __u8 jack_attribute; - __u8 event; -} __packed; - -struct gb_audio_button_event_request { - __u8 widget_id; - __u8 button_id; - __u8 event; -} __packed; - -struct gb_audio_streaming_event_request { - __le16 data_cport; - __u8 event; -} __packed; - -struct gb_audio_send_data_request { - __le64 timestamp; - __u8 data[0]; -} __packed; - - -/* Log */ - -/* operations */ -#define GB_LOG_TYPE_SEND_LOG 0x02 - -/* length */ -#define GB_LOG_MAX_LEN 1024 - -struct gb_log_send_log_request { - __le16 len; - __u8 msg[0]; -} __packed; - -#endif /* __GREYBUS_PROTOCOLS_H */ - diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c index e2b9ab5f6ec2..72b21bf2d7d3 100644 --- a/drivers/staging/greybus/hd.c +++ b/drivers/staging/greybus/hd.c @@ -8,8 +8,8 @@ #include #include +#include -#include "greybus.h" #include "greybus_trace.h" EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create); diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h deleted file mode 100644 index 348b76fabc9a..000000000000 --- a/drivers/staging/greybus/hd.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus Host Device - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#ifndef __HD_H -#define __HD_H - -struct gb_host_device; -struct gb_message; - -struct gb_hd_driver { - size_t hd_priv_size; - - int (*cport_allocate)(struct gb_host_device *hd, int cport_id, - unsigned long flags); - void (*cport_release)(struct gb_host_device *hd, u16 cport_id); - int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, - unsigned long flags); - int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); - int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); - int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); - int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, - u8 phase, unsigned int timeout); - int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, - size_t peer_space, unsigned int timeout); - int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); - - int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, - struct gb_message *message, gfp_t gfp_mask); - void (*message_cancel)(struct gb_message *message); - int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); - int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); - int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, - bool async); -}; - -struct gb_host_device { - struct device dev; - int bus_id; - const struct gb_hd_driver *driver; - - struct list_head modules; - struct list_head connections; - struct ida cport_id_map; - - /* Number of CPorts supported by the UniPro IP */ - size_t num_cports; - - /* Host device buffer constraints */ - size_t buffer_size_max; - - struct gb_svc *svc; - /* Private data for the host driver */ - unsigned long hd_priv[0] __aligned(sizeof(s64)); -}; -#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) - -int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); -void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); -int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, - unsigned long flags); -void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); - -struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, - struct device *parent, - size_t buffer_size_max, - size_t num_cports); -int gb_hd_add(struct gb_host_device *hd); -void gb_hd_del(struct gb_host_device *hd); -void gb_hd_shutdown(struct gb_host_device *hd); -void gb_hd_put(struct gb_host_device *hd); -int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, - bool in_irq); - -int gb_hd_init(void); -void gb_hd_exit(void); - -#endif /* __HD_H */ diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c index 8ab810bf5716..04bfd9110502 100644 --- a/drivers/staging/greybus/hid.c +++ b/drivers/staging/greybus/hid.c @@ -12,8 +12,7 @@ #include #include #include - -#include "greybus.h" +#include /* Greybus HID device's structure */ struct gb_hid { diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c index b2522043a1a4..ab06fc3b9e7e 100644 --- a/drivers/staging/greybus/i2c.c +++ b/drivers/staging/greybus/i2c.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" struct gb_i2c_device { diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c index d7b5b89a2f40..67dbe6fda9a1 100644 --- a/drivers/staging/greybus/interface.c +++ b/drivers/staging/greybus/interface.c @@ -7,8 +7,8 @@ */ #include +#include -#include "greybus.h" #include "greybus_trace.h" #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h deleted file mode 100644 index 8fb1eacda302..000000000000 --- a/drivers/staging/greybus/interface.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus Interface Block code - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#ifndef __INTERFACE_H -#define __INTERFACE_H - -enum gb_interface_type { - GB_INTERFACE_TYPE_INVALID = 0, - GB_INTERFACE_TYPE_UNKNOWN, - GB_INTERFACE_TYPE_DUMMY, - GB_INTERFACE_TYPE_UNIPRO, - GB_INTERFACE_TYPE_GREYBUS, -}; - -#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0) -#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) -#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2) -#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) -#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4) -#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5) -#define GB_INTERFACE_QUIRK_NO_PM BIT(6) - -struct gb_interface { - struct device dev; - struct gb_control *control; - - struct list_head bundles; - struct list_head module_node; - struct list_head manifest_descs; - u8 interface_id; /* Physical location within the Endo */ - u8 device_id; - u8 features; /* Feature flags set in the manifest */ - - enum gb_interface_type type; - - u32 ddbl1_manufacturer_id; - u32 ddbl1_product_id; - u32 vendor_id; - u32 product_id; - u64 serial_number; - - struct gb_host_device *hd; - struct gb_module *module; - - unsigned long quirks; - - struct mutex mutex; - - bool disconnected; - - bool ejected; - bool removed; - bool active; - bool enabled; - bool mode_switch; - bool dme_read; - - struct work_struct mode_switch_work; - struct completion mode_switch_completion; -}; -#define to_gb_interface(d) container_of(d, struct gb_interface, dev) - -struct gb_interface *gb_interface_create(struct gb_module *module, - u8 interface_id); -int gb_interface_activate(struct gb_interface *intf); -void gb_interface_deactivate(struct gb_interface *intf); -int gb_interface_enable(struct gb_interface *intf); -void gb_interface_disable(struct gb_interface *intf); -int gb_interface_add(struct gb_interface *intf); -void gb_interface_del(struct gb_interface *intf); -void gb_interface_put(struct gb_interface *intf); -void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, - u32 mailbox); - -int gb_interface_request_mode_switch(struct gb_interface *intf); - -#endif /* __INTERFACE_H */ diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 010ae1e9c7fb..b3b1b253d112 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -11,11 +11,9 @@ #include #include #include +#include #include -#include "greybus.h" -#include "greybus_protocols.h" - #define NAMES_MAX 32 struct gb_channel { diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c index 4f1f161ff11c..971f36dccac6 100644 --- a/drivers/staging/greybus/log.c +++ b/drivers/staging/greybus/log.c @@ -9,8 +9,7 @@ #include #include #include - -#include "greybus.h" +#include struct gb_log { struct gb_connection *connection; diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c index b0ab0eed5c18..583d9708a191 100644 --- a/drivers/staging/greybus/loopback.c +++ b/drivers/staging/greybus/loopback.c @@ -25,12 +25,9 @@ #include #include #include - +#include #include -#include "greybus.h" -#include "connection.h" - #define NSEC_PER_DAY 86400000000000ULL struct gb_loopback_stats { diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c index 4ebbba52b07c..dd7040697bde 100644 --- a/drivers/staging/greybus/manifest.c +++ b/drivers/staging/greybus/manifest.c @@ -6,7 +6,7 @@ * Copyright 2014-2015 Linaro Ltd. */ -#include "greybus.h" +#include static const char *get_descriptor_type_string(u8 type) { diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h deleted file mode 100644 index 88aa7e44cad5..000000000000 --- a/drivers/staging/greybus/manifest.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus manifest parsing - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#ifndef __MANIFEST_H -#define __MANIFEST_H - -struct gb_interface; -bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); - -#endif /* __MANIFEST_H */ diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c index b251a53d0e8e..36f77f9e1d74 100644 --- a/drivers/staging/greybus/module.c +++ b/drivers/staging/greybus/module.c @@ -6,7 +6,7 @@ * Copyright 2016 Linaro Ltd. */ -#include "greybus.h" +#include #include "greybus_trace.h" static ssize_t eject_store(struct device *dev, diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h deleted file mode 100644 index 2a27e520ee94..000000000000 --- a/drivers/staging/greybus/module.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus Module code - * - * Copyright 2016 Google Inc. - * Copyright 2016 Linaro Ltd. - */ - -#ifndef __MODULE_H -#define __MODULE_H - -struct gb_module { - struct device dev; - struct gb_host_device *hd; - - struct list_head hd_node; - - u8 module_id; - size_t num_interfaces; - - bool disconnected; - - struct gb_interface *interfaces[0]; -}; -#define to_gb_module(d) container_of(d, struct gb_module, dev) - -struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, - size_t num_interfaces); -int gb_module_add(struct gb_module *module); -void gb_module_del(struct gb_module *module); -void gb_module_put(struct gb_module *module); - -#endif /* __MODULE_H */ diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c index fe268f7b63ed..8459e9bc0749 100644 --- a/drivers/staging/greybus/operation.c +++ b/drivers/staging/greybus/operation.c @@ -12,8 +12,8 @@ #include #include #include +#include -#include "greybus.h" #include "greybus_trace.h" static struct kmem_cache *gb_operation_cache; diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h deleted file mode 100644 index 17ba3daf111b..000000000000 --- a/drivers/staging/greybus/operation.h +++ /dev/null @@ -1,224 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus operations - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#ifndef __OPERATION_H -#define __OPERATION_H - -#include - -struct gb_operation; - -/* The default amount of time a request is given to complete */ -#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ - -/* - * The top bit of the type in an operation message header indicates - * whether the message is a request (bit clear) or response (bit set) - */ -#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80) - -enum gb_operation_result { - GB_OP_SUCCESS = 0x00, - GB_OP_INTERRUPTED = 0x01, - GB_OP_TIMEOUT = 0x02, - GB_OP_NO_MEMORY = 0x03, - GB_OP_PROTOCOL_BAD = 0x04, - GB_OP_OVERFLOW = 0x05, - GB_OP_INVALID = 0x06, - GB_OP_RETRY = 0x07, - GB_OP_NONEXISTENT = 0x08, - GB_OP_UNKNOWN_ERROR = 0xfe, - GB_OP_MALFUNCTION = 0xff, -}; - -#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr) -#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX - -/* - * Protocol code should only examine the payload and payload_size fields, and - * host-controller drivers may use the hcpriv field. All other fields are - * intended to be private to the operations core code. - */ -struct gb_message { - struct gb_operation *operation; - struct gb_operation_msg_hdr *header; - - void *payload; - size_t payload_size; - - void *buffer; - - void *hcpriv; -}; - -#define GB_OPERATION_FLAG_INCOMING BIT(0) -#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1) -#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2) -#define GB_OPERATION_FLAG_CORE BIT(3) - -#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \ - GB_OPERATION_FLAG_UNIDIRECTIONAL) - -/* - * A Greybus operation is a remote procedure call performed over a - * connection between two UniPro interfaces. - * - * Every operation consists of a request message sent to the other - * end of the connection coupled with a reply message returned to - * the sender. Every operation has a type, whose interpretation is - * dependent on the protocol associated with the connection. - * - * Only four things in an operation structure are intended to be - * directly usable by protocol handlers: the operation's connection - * pointer; the operation type; the request message payload (and - * size); and the response message payload (and size). Note that a - * message with a 0-byte payload has a null message payload pointer. - * - * In addition, every operation has a result, which is an errno - * value. Protocol handlers access the operation result using - * gb_operation_result(). - */ -typedef void (*gb_operation_callback)(struct gb_operation *); -struct gb_operation { - struct gb_connection *connection; - struct gb_message *request; - struct gb_message *response; - - unsigned long flags; - u8 type; - u16 id; - int errno; /* Operation result */ - - struct work_struct work; - gb_operation_callback callback; - struct completion completion; - struct timer_list timer; - - struct kref kref; - atomic_t waiters; - - int active; - struct list_head links; /* connection->operations */ - - void *private; -}; - -static inline bool -gb_operation_is_incoming(struct gb_operation *operation) -{ - return operation->flags & GB_OPERATION_FLAG_INCOMING; -} - -static inline bool -gb_operation_is_unidirectional(struct gb_operation *operation) -{ - return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; -} - -static inline bool -gb_operation_short_response_allowed(struct gb_operation *operation) -{ - return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; -} - -static inline bool gb_operation_is_core(struct gb_operation *operation) -{ - return operation->flags & GB_OPERATION_FLAG_CORE; -} - -void gb_connection_recv(struct gb_connection *connection, - void *data, size_t size); - -int gb_operation_result(struct gb_operation *operation); - -size_t gb_operation_get_payload_size_max(struct gb_connection *connection); -struct gb_operation * -gb_operation_create_flags(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, unsigned long flags, - gfp_t gfp); - -static inline struct gb_operation * -gb_operation_create(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, gfp_t gfp) -{ - return gb_operation_create_flags(connection, type, request_size, - response_size, 0, gfp); -} - -struct gb_operation * -gb_operation_create_core(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, unsigned long flags, - gfp_t gfp); - -void gb_operation_get(struct gb_operation *operation); -void gb_operation_put(struct gb_operation *operation); - -bool gb_operation_response_alloc(struct gb_operation *operation, - size_t response_size, gfp_t gfp); - -int gb_operation_request_send(struct gb_operation *operation, - gb_operation_callback callback, - unsigned int timeout, - gfp_t gfp); -int gb_operation_request_send_sync_timeout(struct gb_operation *operation, - unsigned int timeout); -static inline int -gb_operation_request_send_sync(struct gb_operation *operation) -{ - return gb_operation_request_send_sync_timeout(operation, - GB_OPERATION_TIMEOUT_DEFAULT); -} - -void gb_operation_cancel(struct gb_operation *operation, int errno); -void gb_operation_cancel_incoming(struct gb_operation *operation, int errno); - -void greybus_message_sent(struct gb_host_device *hd, - struct gb_message *message, int status); - -int gb_operation_sync_timeout(struct gb_connection *connection, int type, - void *request, int request_size, - void *response, int response_size, - unsigned int timeout); -int gb_operation_unidirectional_timeout(struct gb_connection *connection, - int type, void *request, int request_size, - unsigned int timeout); - -static inline int gb_operation_sync(struct gb_connection *connection, int type, - void *request, int request_size, - void *response, int response_size) -{ - return gb_operation_sync_timeout(connection, type, - request, request_size, response, response_size, - GB_OPERATION_TIMEOUT_DEFAULT); -} - -static inline int gb_operation_unidirectional(struct gb_connection *connection, - int type, void *request, int request_size) -{ - return gb_operation_unidirectional_timeout(connection, type, - request, request_size, GB_OPERATION_TIMEOUT_DEFAULT); -} - -static inline void *gb_operation_get_data(struct gb_operation *operation) -{ - return operation->private; -} - -static inline void gb_operation_set_data(struct gb_operation *operation, - void *data) -{ - operation->private = data; -} - -int gb_operation_init(void); -void gb_operation_exit(void); - -#endif /* !__OPERATION_H */ diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c index 34b40a409ea3..ec96f28887f9 100644 --- a/drivers/staging/greybus/power_supply.c +++ b/drivers/staging/greybus/power_supply.c @@ -10,8 +10,7 @@ #include #include #include - -#include "greybus.h" +#include #define PROP_MAX 32 diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c index 4a6d394b6c44..891a6a672378 100644 --- a/drivers/staging/greybus/pwm.c +++ b/drivers/staging/greybus/pwm.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" struct gb_pwm_chip { diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c index 838acbe84ca0..64a17dfe3b6e 100644 --- a/drivers/staging/greybus/raw.c +++ b/drivers/staging/greybus/raw.c @@ -13,8 +13,7 @@ #include #include #include - -#include "greybus.h" +#include struct gb_raw { struct gb_connection *connection; diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c index a097a8916b3b..68c5718be827 100644 --- a/drivers/staging/greybus/sdio.c +++ b/drivers/staging/greybus/sdio.c @@ -12,8 +12,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" struct gb_sdio_host { diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c index 47d896992b35..68e8d272db6d 100644 --- a/drivers/staging/greybus/spi.c +++ b/drivers/staging/greybus/spi.c @@ -7,8 +7,8 @@ */ #include +#include -#include "greybus.h" #include "gbphy.h" #include "spilib.h" diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c index 2e07c6b41334..fc27c52de74a 100644 --- a/drivers/staging/greybus/spilib.c +++ b/drivers/staging/greybus/spilib.c @@ -10,9 +10,9 @@ #include #include #include +#include #include -#include "greybus.h" #include "spilib.h" struct gb_spilib { diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c index 05bc45287b87..ce7740ef449b 100644 --- a/drivers/staging/greybus/svc.c +++ b/drivers/staging/greybus/svc.c @@ -8,8 +8,7 @@ #include #include - -#include "greybus.h" +#include #define SVC_INTF_EJECT_TIMEOUT 9000 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h deleted file mode 100644 index e7452057cfe4..000000000000 --- a/drivers/staging/greybus/svc.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus SVC code - * - * Copyright 2015 Google Inc. - * Copyright 2015 Linaro Ltd. - */ - -#ifndef __SVC_H -#define __SVC_H - -#define GB_SVC_CPORT_FLAG_E2EFC BIT(0) -#define GB_SVC_CPORT_FLAG_CSD_N BIT(1) -#define GB_SVC_CPORT_FLAG_CSV_N BIT(2) - -enum gb_svc_state { - GB_SVC_STATE_RESET, - GB_SVC_STATE_PROTOCOL_VERSION, - GB_SVC_STATE_SVC_HELLO, -}; - -enum gb_svc_watchdog_bite { - GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0, - GB_SVC_WATCHDOG_BITE_PANIC_KERNEL, -}; - -struct gb_svc_watchdog; - -struct svc_debugfs_pwrmon_rail { - u8 id; - struct gb_svc *svc; -}; - -struct gb_svc { - struct device dev; - - struct gb_host_device *hd; - struct gb_connection *connection; - enum gb_svc_state state; - struct ida device_id_map; - struct workqueue_struct *wq; - - u16 endo_id; - u8 ap_intf_id; - - u8 protocol_major; - u8 protocol_minor; - - struct gb_svc_watchdog *watchdog; - enum gb_svc_watchdog_bite action; - - struct dentry *debugfs_dentry; - struct svc_debugfs_pwrmon_rail *pwrmon_rails; -}; -#define to_gb_svc(d) container_of(d, struct gb_svc, dev) - -struct gb_svc *gb_svc_create(struct gb_host_device *hd); -int gb_svc_add(struct gb_svc *svc); -void gb_svc_del(struct gb_svc *svc); -void gb_svc_put(struct gb_svc *svc); - -int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, - u8 measurement_type, u32 *value); -int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id); -int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, - u8 intf2_id, u8 dev2_id); -void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id); -int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, - u8 intf2_id, u16 cport2_id, u8 cport_flags); -void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, - u8 intf2_id, u16 cport2_id); -int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id); -int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable); -int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable); -int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable); -int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type); -int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id); - -int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, - u32 *value); -int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, - u32 value); -int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, - u8 tx_mode, u8 tx_gear, u8 tx_nlanes, - u8 tx_amplitude, u8 tx_hs_equalizer, - u8 rx_mode, u8 rx_gear, u8 rx_nlanes, - u8 flags, u32 quirks, - struct gb_svc_l2_timer_cfg *local, - struct gb_svc_l2_timer_cfg *remote); -int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id); -int gb_svc_ping(struct gb_svc *svc); -int gb_svc_watchdog_create(struct gb_svc *svc); -void gb_svc_watchdog_destroy(struct gb_svc *svc); -bool gb_svc_watchdog_enabled(struct gb_svc *svc); -int gb_svc_watchdog_enable(struct gb_svc *svc); -int gb_svc_watchdog_disable(struct gb_svc *svc); - -int gb_svc_protocol_init(void); -void gb_svc_protocol_exit(void); - -#endif /* __SVC_H */ diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c index 7868ad8211c5..b6b1682c19c4 100644 --- a/drivers/staging/greybus/svc_watchdog.c +++ b/drivers/staging/greybus/svc_watchdog.c @@ -8,7 +8,7 @@ #include #include #include -#include "greybus.h" +#include #define SVC_WATCHDOG_PERIOD (2 * HZ) diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index b3bffe91ae99..55c51143bb09 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -28,8 +28,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" #define GB_NUM_MINORS 16 /* 16 is more than enough */ diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c index 1c246c73a085..8e9d9d59a357 100644 --- a/drivers/staging/greybus/usb.c +++ b/drivers/staging/greybus/usb.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include "greybus.h" #include "gbphy.h" /* Greybus USB request types */ diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c index 3e5dedeacd5c..0e2b188e5ca3 100644 --- a/drivers/staging/greybus/vibrator.c +++ b/drivers/staging/greybus/vibrator.c @@ -13,8 +13,7 @@ #include #include #include - -#include "greybus.h" +#include struct gb_vibrator_device { struct gb_connection *connection; diff --git a/include/linux/greybus.h b/include/linux/greybus.h new file mode 100644 index 000000000000..18c0fb958b74 --- /dev/null +++ b/include/linux/greybus.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus driver and device API + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __LINUX_GREYBUS_H +#define __LINUX_GREYBUS_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Matches up with the Greybus Protocol specification document */ +#define GREYBUS_VERSION_MAJOR 0x00 +#define GREYBUS_VERSION_MINOR 0x01 + +#define GREYBUS_ID_MATCH_DEVICE \ + (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT) + +#define GREYBUS_DEVICE(v, p) \ + .match_flags = GREYBUS_ID_MATCH_DEVICE, \ + .vendor = (v), \ + .product = (p), + +#define GREYBUS_DEVICE_CLASS(c) \ + .match_flags = GREYBUS_ID_MATCH_CLASS, \ + .class = (c), + +/* Maximum number of CPorts */ +#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */ +#define CPORT_ID_BAD U16_MAX + +struct greybus_driver { + const char *name; + + int (*probe)(struct gb_bundle *bundle, + const struct greybus_bundle_id *id); + void (*disconnect)(struct gb_bundle *bundle); + + const struct greybus_bundle_id *id_table; + + struct device_driver driver; +}; +#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver) + +static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data) +{ + dev_set_drvdata(&bundle->dev, data); +} + +static inline void *greybus_get_drvdata(struct gb_bundle *bundle) +{ + return dev_get_drvdata(&bundle->dev); +} + +/* Don't call these directly, use the module_greybus_driver() macro instead */ +int greybus_register_driver(struct greybus_driver *driver, + struct module *module, const char *mod_name); +void greybus_deregister_driver(struct greybus_driver *driver); + +/* define to get proper THIS_MODULE and KBUILD_MODNAME values */ +#define greybus_register(driver) \ + greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) +#define greybus_deregister(driver) \ + greybus_deregister_driver(driver) + +/** + * module_greybus_driver() - Helper macro for registering a Greybus driver + * @__greybus_driver: greybus_driver structure + * + * Helper macro for Greybus drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_greybus_driver(__greybus_driver) \ + module_driver(__greybus_driver, greybus_register, greybus_deregister) + +int greybus_disabled(void); + +void gb_debugfs_init(void); +void gb_debugfs_cleanup(void); +struct dentry *gb_debugfs_get(void); + +extern struct bus_type greybus_bus_type; + +extern struct device_type greybus_hd_type; +extern struct device_type greybus_module_type; +extern struct device_type greybus_interface_type; +extern struct device_type greybus_control_type; +extern struct device_type greybus_bundle_type; +extern struct device_type greybus_svc_type; + +static inline int is_gb_host_device(const struct device *dev) +{ + return dev->type == &greybus_hd_type; +} + +static inline int is_gb_module(const struct device *dev) +{ + return dev->type == &greybus_module_type; +} + +static inline int is_gb_interface(const struct device *dev) +{ + return dev->type == &greybus_interface_type; +} + +static inline int is_gb_control(const struct device *dev) +{ + return dev->type == &greybus_control_type; +} + +static inline int is_gb_bundle(const struct device *dev) +{ + return dev->type == &greybus_bundle_type; +} + +static inline int is_gb_svc(const struct device *dev) +{ + return dev->type == &greybus_svc_type; +} + +static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id) +{ + return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports; +} + +#endif /* __KERNEL__ */ +#endif /* __LINUX_GREYBUS_H */ diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h new file mode 100644 index 000000000000..8734d2055657 --- /dev/null +++ b/include/linux/greybus/bundle.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus bundles + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __BUNDLE_H +#define __BUNDLE_H + +#include + +#define BUNDLE_ID_NONE U8_MAX + +/* Greybus "public" definitions" */ +struct gb_bundle { + struct device dev; + struct gb_interface *intf; + + u8 id; + u8 class; + u8 class_major; + u8 class_minor; + + size_t num_cports; + struct greybus_descriptor_cport *cport_desc; + + struct list_head connections; + u8 *state; + + struct list_head links; /* interface->bundles */ +}; +#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev) + +/* Greybus "private" definitions" */ +struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, + u8 class); +int gb_bundle_add(struct gb_bundle *bundle); +void gb_bundle_destroy(struct gb_bundle *bundle); + +/* Bundle Runtime PM wrappers */ +#ifdef CONFIG_PM +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ + int retval; + + retval = pm_runtime_get_sync(&bundle->dev); + if (retval < 0) { + dev_err(&bundle->dev, + "pm_runtime_get_sync failed: %d\n", retval); + pm_runtime_put_noidle(&bundle->dev); + return retval; + } + + return 0; +} + +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ + int retval; + + pm_runtime_mark_last_busy(&bundle->dev); + retval = pm_runtime_put_autosuspend(&bundle->dev); + + return retval; +} + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) +{ + pm_runtime_get_noresume(&bundle->dev); +} + +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) +{ + pm_runtime_put_noidle(&bundle->dev); +} + +#else +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ return 0; } +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ return 0; } + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {} +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {} +#endif + +#endif /* __BUNDLE_H */ diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h new file mode 100644 index 000000000000..5ca3befc0636 --- /dev/null +++ b/include/linux/greybus/connection.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus connections + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __CONNECTION_H +#define __CONNECTION_H + +#include +#include + +#define GB_CONNECTION_FLAG_CSD BIT(0) +#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) +#define GB_CONNECTION_FLAG_OFFLOADED BIT(2) +#define GB_CONNECTION_FLAG_CDSI1 BIT(3) +#define GB_CONNECTION_FLAG_CONTROL BIT(4) +#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5) + +#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL + +enum gb_connection_state { + GB_CONNECTION_STATE_DISABLED = 0, + GB_CONNECTION_STATE_ENABLED_TX = 1, + GB_CONNECTION_STATE_ENABLED = 2, + GB_CONNECTION_STATE_DISCONNECTING = 3, +}; + +struct gb_operation; + +typedef int (*gb_request_handler_t)(struct gb_operation *); + +struct gb_connection { + struct gb_host_device *hd; + struct gb_interface *intf; + struct gb_bundle *bundle; + struct kref kref; + u16 hd_cport_id; + u16 intf_cport_id; + + struct list_head hd_links; + struct list_head bundle_links; + + gb_request_handler_t handler; + unsigned long flags; + + struct mutex mutex; + spinlock_t lock; + enum gb_connection_state state; + struct list_head operations; + + char name[16]; + struct workqueue_struct *wq; + + atomic_t op_cycle; + + void *private; + + bool mode_switch; +}; + +struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, + u16 hd_cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_control(struct gb_interface *intf); +struct gb_connection *gb_connection_create(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler, + unsigned long flags); +struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle, + u16 cport_id, unsigned long flags); +void gb_connection_destroy(struct gb_connection *connection); + +static inline bool gb_connection_is_static(struct gb_connection *connection) +{ + return !connection->intf; +} + +int gb_connection_enable(struct gb_connection *connection); +int gb_connection_enable_tx(struct gb_connection *connection); +void gb_connection_disable_rx(struct gb_connection *connection); +void gb_connection_disable(struct gb_connection *connection); +void gb_connection_disable_forced(struct gb_connection *connection); + +void gb_connection_mode_switch_prepare(struct gb_connection *connection); +void gb_connection_mode_switch_complete(struct gb_connection *connection); + +void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, + u8 *data, size_t length); + +void gb_connection_latency_tag_enable(struct gb_connection *connection); +void gb_connection_latency_tag_disable(struct gb_connection *connection); + +static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection) +{ + return !(connection->flags & GB_CONNECTION_FLAG_CSD); +} + +static inline bool +gb_connection_flow_control_disabled(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL; +} + +static inline bool gb_connection_is_offloaded(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_OFFLOADED; +} + +static inline bool gb_connection_is_control(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_CONTROL; +} + +static inline void *gb_connection_get_data(struct gb_connection *connection) +{ + return connection->private; +} + +static inline void gb_connection_set_data(struct gb_connection *connection, + void *data) +{ + connection->private = data; +} + +#endif /* __CONNECTION_H */ diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h new file mode 100644 index 000000000000..3a29ec05f631 --- /dev/null +++ b/include/linux/greybus/control.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus CPort control protocol + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __CONTROL_H +#define __CONTROL_H + +struct gb_control { + struct device dev; + struct gb_interface *intf; + + struct gb_connection *connection; + + u8 protocol_major; + u8 protocol_minor; + + bool has_bundle_activate; + bool has_bundle_version; + + char *vendor_string; + char *product_string; +}; +#define to_gb_control(d) container_of(d, struct gb_control, dev) + +struct gb_control *gb_control_create(struct gb_interface *intf); +int gb_control_enable(struct gb_control *control); +void gb_control_disable(struct gb_control *control); +int gb_control_suspend(struct gb_control *control); +int gb_control_resume(struct gb_control *control); +int gb_control_add(struct gb_control *control); +void gb_control_del(struct gb_control *control); +struct gb_control *gb_control_get(struct gb_control *control); +void gb_control_put(struct gb_control *control); + +int gb_control_get_bundle_versions(struct gb_control *control); +int gb_control_connected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnecting_operation(struct gb_control *control, + u16 cport_id); +int gb_control_mode_switch_operation(struct gb_control *control); +void gb_control_mode_switch_prepare(struct gb_control *control); +void gb_control_mode_switch_complete(struct gb_control *control); +int gb_control_get_manifest_size_operation(struct gb_interface *intf); +int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, + size_t size); +int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id); +int gb_control_interface_suspend_prepare(struct gb_control *control); +int gb_control_interface_deactivate_prepare(struct gb_control *control); +int gb_control_interface_hibernate_abort(struct gb_control *control); +#endif /* __CONTROL_H */ diff --git a/include/linux/greybus/greybus_id.h b/include/linux/greybus/greybus_id.h new file mode 100644 index 000000000000..f4c8440093e4 --- /dev/null +++ b/include/linux/greybus/greybus_id.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* FIXME + * move this to include/linux/mod_devicetable.h when merging + */ + +#ifndef __LINUX_GREYBUS_ID_H +#define __LINUX_GREYBUS_ID_H + +#include +#include + + +struct greybus_bundle_id { + __u16 match_flags; + __u32 vendor; + __u32 product; + __u8 class; + + kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t)); +}; + +/* Used to match the greybus_bundle_id */ +#define GREYBUS_ID_MATCH_VENDOR BIT(0) +#define GREYBUS_ID_MATCH_PRODUCT BIT(1) +#define GREYBUS_ID_MATCH_CLASS BIT(2) + +#endif /* __LINUX_GREYBUS_ID_H */ diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h new file mode 100644 index 000000000000..db68f7e7d5a7 --- /dev/null +++ b/include/linux/greybus/greybus_manifest.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest definition + * + * See "Greybus Application Protocol" document (version 0.1) for + * details on these values and structures. + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + * + * Released under the GPLv2 and BSD licenses. + */ + +#ifndef __GREYBUS_MANIFEST_H +#define __GREYBUS_MANIFEST_H + +enum greybus_descriptor_type { + GREYBUS_TYPE_INVALID = 0x00, + GREYBUS_TYPE_INTERFACE = 0x01, + GREYBUS_TYPE_STRING = 0x02, + GREYBUS_TYPE_BUNDLE = 0x03, + GREYBUS_TYPE_CPORT = 0x04, +}; + +enum greybus_protocol { + GREYBUS_PROTOCOL_CONTROL = 0x00, + /* 0x01 is unused */ + GREYBUS_PROTOCOL_GPIO = 0x02, + GREYBUS_PROTOCOL_I2C = 0x03, + GREYBUS_PROTOCOL_UART = 0x04, + GREYBUS_PROTOCOL_HID = 0x05, + GREYBUS_PROTOCOL_USB = 0x06, + GREYBUS_PROTOCOL_SDIO = 0x07, + GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08, + GREYBUS_PROTOCOL_PWM = 0x09, + /* 0x0a is unused */ + GREYBUS_PROTOCOL_SPI = 0x0b, + GREYBUS_PROTOCOL_DISPLAY = 0x0c, + GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d, + GREYBUS_PROTOCOL_SENSOR = 0x0e, + GREYBUS_PROTOCOL_LIGHTS = 0x0f, + GREYBUS_PROTOCOL_VIBRATOR = 0x10, + GREYBUS_PROTOCOL_LOOPBACK = 0x11, + GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12, + GREYBUS_PROTOCOL_AUDIO_DATA = 0x13, + GREYBUS_PROTOCOL_SVC = 0x14, + GREYBUS_PROTOCOL_BOOTROM = 0x15, + GREYBUS_PROTOCOL_CAMERA_DATA = 0x16, + GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17, + GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18, + GREYBUS_PROTOCOL_AUTHENTICATION = 0x19, + GREYBUS_PROTOCOL_LOG = 0x1a, + /* ... */ + GREYBUS_PROTOCOL_RAW = 0xfe, + GREYBUS_PROTOCOL_VENDOR = 0xff, +}; + +enum greybus_class_type { + GREYBUS_CLASS_CONTROL = 0x00, + /* 0x01 is unused */ + /* 0x02 is unused */ + /* 0x03 is unused */ + /* 0x04 is unused */ + GREYBUS_CLASS_HID = 0x05, + /* 0x06 is unused */ + /* 0x07 is unused */ + GREYBUS_CLASS_POWER_SUPPLY = 0x08, + /* 0x09 is unused */ + GREYBUS_CLASS_BRIDGED_PHY = 0x0a, + /* 0x0b is unused */ + GREYBUS_CLASS_DISPLAY = 0x0c, + GREYBUS_CLASS_CAMERA = 0x0d, + GREYBUS_CLASS_SENSOR = 0x0e, + GREYBUS_CLASS_LIGHTS = 0x0f, + GREYBUS_CLASS_VIBRATOR = 0x10, + GREYBUS_CLASS_LOOPBACK = 0x11, + GREYBUS_CLASS_AUDIO = 0x12, + /* 0x13 is unused */ + /* 0x14 is unused */ + GREYBUS_CLASS_BOOTROM = 0x15, + GREYBUS_CLASS_FW_MANAGEMENT = 0x16, + GREYBUS_CLASS_LOG = 0x17, + /* ... */ + GREYBUS_CLASS_RAW = 0xfe, + GREYBUS_CLASS_VENDOR = 0xff, +}; + +enum { + GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0), +}; + +/* + * The string in a string descriptor is not NUL-terminated. The + * size of the descriptor will be rounded up to a multiple of 4 + * bytes, by padding the string with 0x00 bytes if necessary. + */ +struct greybus_descriptor_string { + __u8 length; + __u8 id; + __u8 string[0]; +} __packed; + +/* + * An interface descriptor describes information about an interface as a whole, + * *not* the functions within it. + */ +struct greybus_descriptor_interface { + __u8 vendor_stringid; + __u8 product_stringid; + __u8 features; + __u8 pad; +} __packed; + +/* + * An bundle descriptor defines an identification number and a class for + * each bundle. + * + * @id: Uniquely identifies a bundle within a interface, its sole purpose is to + * allow CPort descriptors to specify which bundle they are associated with. + * The first bundle will have id 0, second will have 1 and so on. + * + * The largest CPort id associated with an bundle (defined by a + * CPort descriptor in the manifest) is used to determine how to + * encode the device id and module number in UniPro packets + * that use the bundle. + * + * @class: It is used by kernel to know the functionality provided by the + * bundle and will be matched against drivers functinality while probing greybus + * driver. It should contain one of the values defined in + * 'enum greybus_class_type'. + * + */ +struct greybus_descriptor_bundle { + __u8 id; /* interface-relative id (0..) */ + __u8 class; + __u8 pad[2]; +} __packed; + +/* + * A CPort descriptor indicates the id of the bundle within the + * module it's associated with, along with the CPort id used to + * address the CPort. The protocol id defines the format of messages + * exchanged using the CPort. + */ +struct greybus_descriptor_cport { + __le16 id; + __u8 bundle; + __u8 protocol_id; /* enum greybus_protocol */ +} __packed; + +struct greybus_descriptor_header { + __le16 size; + __u8 type; /* enum greybus_descriptor_type */ + __u8 pad; +} __packed; + +struct greybus_descriptor { + struct greybus_descriptor_header header; + union { + struct greybus_descriptor_string string; + struct greybus_descriptor_interface interface; + struct greybus_descriptor_bundle bundle; + struct greybus_descriptor_cport cport; + }; +} __packed; + +struct greybus_manifest_header { + __le16 size; + __u8 version_major; + __u8 version_minor; +} __packed; + +struct greybus_manifest { + struct greybus_manifest_header header; + struct greybus_descriptor descriptors[0]; +} __packed; + +#endif /* __GREYBUS_MANIFEST_H */ diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h new file mode 100644 index 000000000000..5f34d1effb59 --- /dev/null +++ b/include/linux/greybus/greybus_protocols.h @@ -0,0 +1,2176 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * Copyright(c) 2014 - 2015 Google Inc. All rights reserved. + * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved. + */ + +#ifndef __GREYBUS_PROTOCOLS_H +#define __GREYBUS_PROTOCOLS_H + +/* Fixed IDs for control/svc protocols */ + +/* SVC switch-port device ids */ +#define GB_SVC_DEVICE_ID_SVC 0 +#define GB_SVC_DEVICE_ID_AP 1 +#define GB_SVC_DEVICE_ID_MIN 2 +#define GB_SVC_DEVICE_ID_MAX 31 + +#define GB_SVC_CPORT_ID 0 +#define GB_CONTROL_BUNDLE_ID 0 +#define GB_CONTROL_CPORT_ID 0 + + +/* + * All operation messages (both requests and responses) begin with + * a header that encodes the size of the message (header included). + * This header also contains a unique identifier, that associates a + * response message with its operation. The header contains an + * operation type field, whose interpretation is dependent on what + * type of protocol is used over the connection. The high bit + * (0x80) of the operation type field is used to indicate whether + * the message is a request (clear) or a response (set). + * + * Response messages include an additional result byte, which + * communicates the result of the corresponding request. A zero + * result value means the operation completed successfully. Any + * other value indicates an error; in this case, the payload of the + * response message (if any) is ignored. The result byte must be + * zero in the header for a request message. + * + * The wire format for all numeric fields in the header is little + * endian. Any operation-specific data begins immediately after the + * header. + */ +struct gb_operation_msg_hdr { + __le16 size; /* Size in bytes of header + payload */ + __le16 operation_id; /* Operation unique id */ + __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ + __u8 result; /* Result of request (in responses only) */ + __u8 pad[2]; /* must be zero (ignore when read) */ +} __packed; + + +/* Generic request types */ +#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00 +#define GB_REQUEST_TYPE_INVALID 0x7f + +struct gb_cport_shutdown_request { + __u8 phase; +} __packed; + + +/* Control Protocol */ + +/* Greybus control request types */ +#define GB_CONTROL_TYPE_VERSION 0x01 +#define GB_CONTROL_TYPE_PROBE_AP 0x02 +#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03 +#define GB_CONTROL_TYPE_GET_MANIFEST 0x04 +#define GB_CONTROL_TYPE_CONNECTED 0x05 +#define GB_CONTROL_TYPE_DISCONNECTED 0x06 +#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07 +#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08 +#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09 +/* Unused 0x0a */ +#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b +#define GB_CONTROL_TYPE_DISCONNECTING 0x0c +#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d +#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e +#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f +#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10 +#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11 +#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12 +#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13 +#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14 +#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15 + +struct gb_control_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_version_response { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_bundle_version_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Control protocol manifest get size request has no payload*/ +struct gb_control_get_manifest_size_response { + __le16 size; +} __packed; + +/* Control protocol manifest get request has no payload */ +struct gb_control_get_manifest_response { + __u8 data[0]; +} __packed; + +/* Control protocol [dis]connected request */ +struct gb_control_connected_request { + __le16 cport_id; +} __packed; + +struct gb_control_disconnecting_request { + __le16 cport_id; +} __packed; +/* disconnecting response has no payload */ + +struct gb_control_disconnected_request { + __le16 cport_id; +} __packed; +/* Control protocol [dis]connected response has no payload */ + +/* + * All Bundle power management operations use the same request and response + * layout and status codes. + */ + +#define GB_CONTROL_BUNDLE_PM_OK 0x00 +#define GB_CONTROL_BUNDLE_PM_INVAL 0x01 +#define GB_CONTROL_BUNDLE_PM_BUSY 0x02 +#define GB_CONTROL_BUNDLE_PM_FAIL 0x03 +#define GB_CONTROL_BUNDLE_PM_NA 0x04 + +struct gb_control_bundle_pm_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_pm_response { + __u8 status; +} __packed; + +/* + * Interface Suspend Prepare and Deactivate Prepare operations use the same + * response layout and error codes. Define a single response structure and reuse + * it. Both operations have no payload. + */ + +#define GB_CONTROL_INTF_PM_OK 0x00 +#define GB_CONTROL_INTF_PM_BUSY 0x01 +#define GB_CONTROL_INTF_PM_NA 0x02 + +struct gb_control_intf_pm_response { + __u8 status; +} __packed; + +/* APBridge protocol */ + +/* request APB1 log */ +#define GB_APB_REQUEST_LOG 0x02 + +/* request to map a cport to bulk in and bulk out endpoints */ +#define GB_APB_REQUEST_EP_MAPPING 0x03 + +/* request to get the number of cports available */ +#define GB_APB_REQUEST_CPORT_COUNT 0x04 + +/* request to reset a cport state */ +#define GB_APB_REQUEST_RESET_CPORT 0x05 + +/* request to time the latency of messages on a given cport */ +#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06 +#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07 + +/* request to control the CSI transmitter */ +#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08 + +/* request to control audio streaming */ +#define GB_APB_REQUEST_AUDIO_CONTROL 0x09 + +/* TimeSync requests */ +#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d +#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e +#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10 + +/* requests to set Greybus CPort flags */ +#define GB_APB_REQUEST_CPORT_FLAGS 0x11 + +/* ARPC request */ +#define GB_APB_REQUEST_ARPC_RUN 0x12 + +struct gb_apb_request_cport_flags { + __le32 flags; +#define GB_APB_CPORT_FLAG_CONTROL 0x01 +#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02 +} __packed; + + +/* Firmware Download Protocol */ + +/* Request Types */ +#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01 +#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02 +#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03 + +#define GB_FIRMWARE_TAG_MAX_SIZE 10 + +/* firmware download find firmware request/response */ +struct gb_fw_download_find_firmware_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_download_find_firmware_response { + __u8 firmware_id; + __le32 size; +} __packed; + +/* firmware download fetch firmware request/response */ +struct gb_fw_download_fetch_firmware_request { + __u8 firmware_id; + __le32 offset; + __le32 size; +} __packed; + +struct gb_fw_download_fetch_firmware_response { + __u8 data[0]; +} __packed; + +/* firmware download release firmware request */ +struct gb_fw_download_release_firmware_request { + __u8 firmware_id; +} __packed; +/* firmware download release firmware response has no payload */ + + +/* Firmware Management Protocol */ + +/* Request Types */ +#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01 +#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02 +#define GB_FW_MGMT_TYPE_LOADED_FW 0x03 +#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06 + +#define GB_FW_LOAD_METHOD_UNIPRO 0x01 +#define GB_FW_LOAD_METHOD_INTERNAL 0x02 + +#define GB_FW_LOAD_STATUS_FAILED 0x00 +#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01 +#define GB_FW_LOAD_STATUS_VALIDATED 0x02 +#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03 + +#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03 +#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04 +#define GB_FW_BACKEND_FW_STATUS_INT 0x05 +#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06 +#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07 + +#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03 +#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04 +#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05 + +/* firmware management interface firmware version request has no payload */ +struct gb_fw_mgmt_interface_fw_version_response { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; + __le16 major; + __le16 minor; +} __packed; + +/* firmware management load and validate firmware request/response */ +struct gb_fw_mgmt_load_and_validate_fw_request { + __u8 request_id; + __u8 load_method; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management load and validate firmware response has no payload*/ + +/* firmware management loaded firmware request */ +struct gb_fw_mgmt_loaded_fw_request { + __u8 request_id; + __u8 status; + __le16 major; + __le16 minor; +} __packed; +/* firmware management loaded firmware response has no payload */ + +/* firmware management backend firmware version request/response */ +struct gb_fw_mgmt_backend_fw_version_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_mgmt_backend_fw_version_response { + __le16 major; + __le16 minor; + __u8 status; +} __packed; + +/* firmware management backend firmware update request */ +struct gb_fw_mgmt_backend_fw_update_request { + __u8 request_id; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management backend firmware update response has no payload */ + +/* firmware management backend firmware updated request */ +struct gb_fw_mgmt_backend_fw_updated_request { + __u8 request_id; + __u8 status; +} __packed; +/* firmware management backend firmware updated response has no payload */ + + +/* Component Authentication Protocol (CAP) */ + +/* Request Types */ +#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01 +#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02 +#define GB_CAP_TYPE_AUTHENTICATE 0x03 + +/* CAP get endpoint uid request has no payload */ +struct gb_cap_get_endpoint_uid_response { + __u8 uid[8]; +} __packed; + +/* CAP get endpoint ims certificate request/response */ +struct gb_cap_get_ims_certificate_request { + __le32 certificate_class; + __le32 certificate_id; +} __packed; + +struct gb_cap_get_ims_certificate_response { + __u8 result_code; + __u8 certificate[0]; +} __packed; + +/* CAP authenticate request/response */ +struct gb_cap_authenticate_request { + __le32 auth_type; + __u8 uid[8]; + __u8 challenge[32]; +} __packed; + +struct gb_cap_authenticate_response { + __u8 result_code; + __u8 response[64]; + __u8 signature[0]; +} __packed; + + +/* Bootrom Protocol */ + +/* Version of the Greybus bootrom protocol we support */ +#define GB_BOOTROM_VERSION_MAJOR 0x00 +#define GB_BOOTROM_VERSION_MINOR 0x01 + +/* Greybus bootrom request types */ +#define GB_BOOTROM_TYPE_VERSION 0x01 +#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02 +#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03 +#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04 +#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */ +#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */ + +/* Greybus bootrom boot stages */ +#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */ + +/* Greybus bootrom ready to boot status */ +#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */ +#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */ +#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */ + +/* Max bootrom data fetch size in bytes */ +#define GB_BOOTROM_FETCH_MAX 2000 + +struct gb_bootrom_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_bootrom_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Bootrom protocol firmware size request/response */ +struct gb_bootrom_firmware_size_request { + __u8 stage; +} __packed; + +struct gb_bootrom_firmware_size_response { + __le32 size; +} __packed; + +/* Bootrom protocol get firmware request/response */ +struct gb_bootrom_get_firmware_request { + __le32 offset; + __le32 size; +} __packed; + +struct gb_bootrom_get_firmware_response { + __u8 data[0]; +} __packed; + +/* Bootrom protocol Ready to boot request */ +struct gb_bootrom_ready_to_boot_request { + __u8 status; +} __packed; +/* Bootrom protocol Ready to boot response has no payload */ + +/* Bootrom protocol get VID/PID request has no payload */ +struct gb_bootrom_get_vid_pid_response { + __le32 vendor_id; + __le32 product_id; +} __packed; + + +/* Power Supply */ + +/* Greybus power supply request types */ +#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02 +#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03 +#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04 +#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05 +#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06 +#define GB_POWER_SUPPLY_TYPE_EVENT 0x07 + +/* Greybus power supply battery technologies types */ +#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_TECH_NiMH 0x0001 +#define GB_POWER_SUPPLY_TECH_LION 0x0002 +#define GB_POWER_SUPPLY_TECH_LIPO 0x0003 +#define GB_POWER_SUPPLY_TECH_LiFe 0x0004 +#define GB_POWER_SUPPLY_TECH_NiCd 0x0005 +#define GB_POWER_SUPPLY_TECH_LiMn 0x0006 + +/* Greybus power supply types */ +#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000 +#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001 +#define GB_POWER_SUPPLY_UPS_TYPE 0x0002 +#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003 +#define GB_POWER_SUPPLY_USB_TYPE 0x0004 +#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005 +#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006 +#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007 + +/* Greybus power supply health values */ +#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001 +#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002 +#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003 +#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004 +#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005 +#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006 +#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007 +#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008 + +/* Greybus power supply status values */ +#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001 +#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002 +#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003 +#define GB_POWER_SUPPLY_STATUS_FULL 0x0004 + +/* Greybus power supply capacity level values */ +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005 + +/* Greybus power supply scope values */ +#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001 +#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002 + +struct gb_power_supply_get_supplies_response { + __u8 supplies_count; +} __packed; + +struct gb_power_supply_get_description_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_description_response { + __u8 manufacturer[32]; + __u8 model[32]; + __u8 serial_number[32]; + __le16 type; + __u8 properties_count; +} __packed; + +struct gb_power_supply_props_desc { + __u8 property; +#define GB_POWER_SUPPLY_PROP_STATUS 0x00 +#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01 +#define GB_POWER_SUPPLY_PROP_HEALTH 0x02 +#define GB_POWER_SUPPLY_PROP_PRESENT 0x03 +#define GB_POWER_SUPPLY_PROP_ONLINE 0x04 +#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05 +#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06 +#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B +#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C +#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D +#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E +#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F +#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10 +#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11 +#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12 +#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13 +#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14 +#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19 +#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A +#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B +#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22 +#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27 +#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28 +#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29 +#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C +#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D +#define GB_POWER_SUPPLY_PROP_TEMP 0x2E +#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F +#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39 +#define GB_POWER_SUPPLY_PROP_TYPE 0x3A +#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B +#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C +#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D + __u8 is_writeable; +} __packed; + +struct gb_power_supply_get_property_descriptors_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_property_descriptors_response { + __u8 properties_count; + struct gb_power_supply_props_desc props[]; +} __packed; + +struct gb_power_supply_get_property_request { + __u8 psy_id; + __u8 property; +} __packed; + +struct gb_power_supply_get_property_response { + __le32 prop_val; +}; + +struct gb_power_supply_set_property_request { + __u8 psy_id; + __u8 property; + __le32 prop_val; +} __packed; + +struct gb_power_supply_event_request { + __u8 psy_id; + __u8 event; +#define GB_POWER_SUPPLY_UPDATE 0x01 +} __packed; + + +/* HID */ + +/* Greybus HID operation types */ +#define GB_HID_TYPE_GET_DESC 0x02 +#define GB_HID_TYPE_GET_REPORT_DESC 0x03 +#define GB_HID_TYPE_PWR_ON 0x04 +#define GB_HID_TYPE_PWR_OFF 0x05 +#define GB_HID_TYPE_GET_REPORT 0x06 +#define GB_HID_TYPE_SET_REPORT 0x07 +#define GB_HID_TYPE_IRQ_EVENT 0x08 + +/* Report type */ +#define GB_HID_INPUT_REPORT 0 +#define GB_HID_OUTPUT_REPORT 1 +#define GB_HID_FEATURE_REPORT 2 + +/* Different request/response structures */ +/* HID get descriptor response */ +struct gb_hid_desc_response { + __u8 bLength; + __le16 wReportDescLength; + __le16 bcdHID; + __le16 wProductID; + __le16 wVendorID; + __u8 bCountryCode; +} __packed; + +/* HID get report request/response */ +struct gb_hid_get_report_request { + __u8 report_type; + __u8 report_id; +} __packed; + +/* HID set report request */ +struct gb_hid_set_report_request { + __u8 report_type; + __u8 report_id; + __u8 report[0]; +} __packed; + +/* HID input report request, via interrupt pipe */ +struct gb_hid_input_report_request { + __u8 report[0]; +} __packed; + + +/* I2C */ + +/* Greybus i2c request types */ +#define GB_I2C_TYPE_FUNCTIONALITY 0x02 +#define GB_I2C_TYPE_TRANSFER 0x05 + +/* functionality request has no payload */ +struct gb_i2c_functionality_response { + __le32 functionality; +} __packed; + +/* + * Outgoing data immediately follows the op count and ops array. + * The data for each write (master -> slave) op in the array is sent + * in order, with no (e.g. pad) bytes separating them. + * + * Short reads cause the entire transfer request to fail So response + * payload consists only of bytes read, and the number of bytes is + * exactly what was specified in the corresponding op. Like + * outgoing data, the incoming data is in order and contiguous. + */ +struct gb_i2c_transfer_op { + __le16 addr; + __le16 flags; + __le16 size; +} __packed; + +struct gb_i2c_transfer_request { + __le16 op_count; + struct gb_i2c_transfer_op ops[0]; /* op_count of these */ +} __packed; +struct gb_i2c_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + + +/* GPIO */ + +/* Greybus GPIO request types */ +#define GB_GPIO_TYPE_LINE_COUNT 0x02 +#define GB_GPIO_TYPE_ACTIVATE 0x03 +#define GB_GPIO_TYPE_DEACTIVATE 0x04 +#define GB_GPIO_TYPE_GET_DIRECTION 0x05 +#define GB_GPIO_TYPE_DIRECTION_IN 0x06 +#define GB_GPIO_TYPE_DIRECTION_OUT 0x07 +#define GB_GPIO_TYPE_GET_VALUE 0x08 +#define GB_GPIO_TYPE_SET_VALUE 0x09 +#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a +#define GB_GPIO_TYPE_IRQ_TYPE 0x0b +#define GB_GPIO_TYPE_IRQ_MASK 0x0c +#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d +#define GB_GPIO_TYPE_IRQ_EVENT 0x0e + +#define GB_GPIO_IRQ_TYPE_NONE 0x00 +#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01 +#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 +#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 +#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 +#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 + +/* line count request has no payload */ +struct gb_gpio_line_count_response { + __u8 count; +} __packed; + +struct gb_gpio_activate_request { + __u8 which; +} __packed; +/* activate response has no payload */ + +struct gb_gpio_deactivate_request { + __u8 which; +} __packed; +/* deactivate response has no payload */ + +struct gb_gpio_get_direction_request { + __u8 which; +} __packed; +struct gb_gpio_get_direction_response { + __u8 direction; +} __packed; + +struct gb_gpio_direction_in_request { + __u8 which; +} __packed; +/* direction in response has no payload */ + +struct gb_gpio_direction_out_request { + __u8 which; + __u8 value; +} __packed; +/* direction out response has no payload */ + +struct gb_gpio_get_value_request { + __u8 which; +} __packed; +struct gb_gpio_get_value_response { + __u8 value; +} __packed; + +struct gb_gpio_set_value_request { + __u8 which; + __u8 value; +} __packed; +/* set value response has no payload */ + +struct gb_gpio_set_debounce_request { + __u8 which; + __le16 usec; +} __packed; +/* debounce response has no payload */ + +struct gb_gpio_irq_type_request { + __u8 which; + __u8 type; +} __packed; +/* irq type response has no payload */ + +struct gb_gpio_irq_mask_request { + __u8 which; +} __packed; +/* irq mask response has no payload */ + +struct gb_gpio_irq_unmask_request { + __u8 which; +} __packed; +/* irq unmask response has no payload */ + +/* irq event requests originate on another module and are handled on the AP */ +struct gb_gpio_irq_event_request { + __u8 which; +} __packed; +/* irq event has no response */ + + +/* PWM */ + +/* Greybus PWM operation types */ +#define GB_PWM_TYPE_PWM_COUNT 0x02 +#define GB_PWM_TYPE_ACTIVATE 0x03 +#define GB_PWM_TYPE_DEACTIVATE 0x04 +#define GB_PWM_TYPE_CONFIG 0x05 +#define GB_PWM_TYPE_POLARITY 0x06 +#define GB_PWM_TYPE_ENABLE 0x07 +#define GB_PWM_TYPE_DISABLE 0x08 + +/* pwm count request has no payload */ +struct gb_pwm_count_response { + __u8 count; +} __packed; + +struct gb_pwm_activate_request { + __u8 which; +} __packed; + +struct gb_pwm_deactivate_request { + __u8 which; +} __packed; + +struct gb_pwm_config_request { + __u8 which; + __le32 duty; + __le32 period; +} __packed; + +struct gb_pwm_polarity_request { + __u8 which; + __u8 polarity; +} __packed; + +struct gb_pwm_enable_request { + __u8 which; +} __packed; + +struct gb_pwm_disable_request { + __u8 which; +} __packed; + +/* SPI */ + +/* Should match up with modes in linux/spi/spi.h */ +#define GB_SPI_MODE_CPHA 0x01 /* clock phase */ +#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */ +#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */ +#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0) +#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */ +#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */ +#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */ +#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */ + +/* Should match up with flags in linux/spi/spi.h */ +#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */ +#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */ + +/* Greybus spi operation types */ +#define GB_SPI_TYPE_MASTER_CONFIG 0x02 +#define GB_SPI_TYPE_DEVICE_CONFIG 0x03 +#define GB_SPI_TYPE_TRANSFER 0x04 + +/* mode request has no payload */ +struct gb_spi_master_config_response { + __le32 bits_per_word_mask; + __le32 min_speed_hz; + __le32 max_speed_hz; + __le16 mode; + __le16 flags; + __u8 num_chipselect; +} __packed; + +struct gb_spi_device_config_request { + __u8 chip_select; +} __packed; + +struct gb_spi_device_config_response { + __le16 mode; + __u8 bits_per_word; + __le32 max_speed_hz; + __u8 device_type; +#define GB_SPI_SPI_DEV 0x00 +#define GB_SPI_SPI_NOR 0x01 +#define GB_SPI_SPI_MODALIAS 0x02 + __u8 name[32]; +} __packed; + +/** + * struct gb_spi_transfer - a read/write buffer pair + * @speed_hz: Select a speed other than the device default for this transfer. If + * 0 the default (from @spi_device) is used. + * @len: size of rx and tx buffers (in bytes) + * @delay_usecs: microseconds to delay after this transfer before (optionally) + * changing the chipselect status, then starting the next transfer or + * completing this spi_message. + * @cs_change: affects chipselect after this transfer completes + * @bits_per_word: select a bits_per_word other than the device default for this + * transfer. If 0 the default (from @spi_device) is used. + */ +struct gb_spi_transfer { + __le32 speed_hz; + __le32 len; + __le16 delay_usecs; + __u8 cs_change; + __u8 bits_per_word; + __u8 xfer_flags; +#define GB_SPI_XFER_READ 0x01 +#define GB_SPI_XFER_WRITE 0x02 +#define GB_SPI_XFER_INPROGRESS 0x04 +} __packed; + +struct gb_spi_transfer_request { + __u8 chip_select; /* of the spi device */ + __u8 mode; /* of the spi device */ + __le16 count; + struct gb_spi_transfer transfers[0]; /* count of these */ +} __packed; + +struct gb_spi_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + +/* Version of the Greybus SVC protocol we support */ +#define GB_SVC_VERSION_MAJOR 0x00 +#define GB_SVC_VERSION_MINOR 0x01 + +/* Greybus SVC request types */ +#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01 +#define GB_SVC_TYPE_SVC_HELLO 0x02 +#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03 +#define GB_SVC_TYPE_INTF_RESET 0x06 +#define GB_SVC_TYPE_CONN_CREATE 0x07 +#define GB_SVC_TYPE_CONN_DESTROY 0x08 +#define GB_SVC_TYPE_DME_PEER_GET 0x09 +#define GB_SVC_TYPE_DME_PEER_SET 0x0a +#define GB_SVC_TYPE_ROUTE_CREATE 0x0b +#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c +#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d +#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e +#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_SVC_TYPE_INTF_SET_PWRM 0x10 +#define GB_SVC_TYPE_INTF_EJECT 0x11 +#define GB_SVC_TYPE_PING 0x13 +#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14 +#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15 +#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16 +#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19 +#define GB_SVC_TYPE_TIMESYNC_PING 0x1a +#define GB_SVC_TYPE_MODULE_INSERTED 0x1f +#define GB_SVC_TYPE_MODULE_REMOVED 0x20 +#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21 +#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22 +#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23 +#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24 +#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25 +#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26 +#define GB_SVC_TYPE_INTF_ACTIVATE 0x27 +#define GB_SVC_TYPE_INTF_RESUME 0x28 +#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29 +#define GB_SVC_TYPE_INTF_OOPS 0x2a + +/* Greybus SVC protocol status values */ +#define GB_SVC_OP_SUCCESS 0x00 +#define GB_SVC_OP_UNKNOWN_ERROR 0x01 +#define GB_SVC_INTF_NOT_DETECTED 0x02 +#define GB_SVC_INTF_NO_UPRO_LINK 0x03 +#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04 +#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05 +#define GB_SVC_INTF_NO_V_SYS 0x06 +#define GB_SVC_INTF_V_CHG 0x07 +#define GB_SVC_INTF_WAKE_BUSY 0x08 +#define GB_SVC_INTF_NO_REFCLK 0x09 +#define GB_SVC_INTF_RELEASING 0x0a +#define GB_SVC_INTF_NO_ORDER 0x0b +#define GB_SVC_INTF_MBOX_SET 0x0c +#define GB_SVC_INTF_BAD_MBOX 0x0d +#define GB_SVC_INTF_OP_TIMEOUT 0x0e +#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f + +struct gb_svc_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_svc_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* SVC protocol hello request */ +struct gb_svc_hello_request { + __le16 endo_id; + __u8 interface_id; +} __packed; +/* hello response has no payload */ + +struct gb_svc_intf_device_id_request { + __u8 intf_id; + __u8 device_id; +} __packed; +/* device id response has no payload */ + +struct gb_svc_intf_reset_request { + __u8 intf_id; +} __packed; +/* interface reset response has no payload */ + +struct gb_svc_intf_eject_request { + __u8 intf_id; +} __packed; +/* interface eject response has no payload */ + +struct gb_svc_conn_create_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; + __u8 tc; + __u8 flags; +} __packed; +/* connection create response has no payload */ + +struct gb_svc_conn_destroy_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; +} __packed; +/* connection destroy response has no payload */ + +struct gb_svc_dme_peer_get_request { + __u8 intf_id; + __le16 attr; + __le16 selector; +} __packed; + +struct gb_svc_dme_peer_get_response { + __le16 result_code; + __le32 attr_value; +} __packed; + +struct gb_svc_dme_peer_set_request { + __u8 intf_id; + __le16 attr; + __le16 selector; + __le32 value; +} __packed; + +struct gb_svc_dme_peer_set_response { + __le16 result_code; +} __packed; + +/* Greybus init-status values, currently retrieved using DME peer gets. */ +#define GB_INIT_SPI_BOOT_STARTED 0x02 +#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03 +#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04 +#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06 +#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09 +#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D + +struct gb_svc_route_create_request { + __u8 intf1_id; + __u8 dev1_id; + __u8 intf2_id; + __u8 dev2_id; +} __packed; +/* route create response has no payload */ + +struct gb_svc_route_destroy_request { + __u8 intf1_id; + __u8 intf2_id; +} __packed; +/* route destroy response has no payload */ + +/* used for svc_intf_vsys_{enable,disable} */ +struct gb_svc_intf_vsys_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_vsys_response { + __u8 result_code; +#define GB_SVC_INTF_VSYS_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_VSYS_FAIL 0x02 +} __packed; + +/* used for svc_intf_refclk_{enable,disable} */ +struct gb_svc_intf_refclk_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_refclk_response { + __u8 result_code; +#define GB_SVC_INTF_REFCLK_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_REFCLK_FAIL 0x02 +} __packed; + +/* used for svc_intf_unipro_{enable,disable} */ +struct gb_svc_intf_unipro_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_unipro_response { + __u8 result_code; +#define GB_SVC_INTF_UNIPRO_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_UNIPRO_FAIL 0x02 +#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03 +} __packed; + +#define GB_SVC_UNIPRO_FAST_MODE 0x01 +#define GB_SVC_UNIPRO_SLOW_MODE 0x02 +#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04 +#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05 +#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07 +#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11 +#define GB_SVC_UNIPRO_OFF_MODE 0x12 + +#define GB_SVC_SMALL_AMPLITUDE 0x01 +#define GB_SVC_LARGE_AMPLITUDE 0x02 + +#define GB_SVC_NO_DE_EMPHASIS 0x00 +#define GB_SVC_SMALL_DE_EMPHASIS 0x01 +#define GB_SVC_LARGE_DE_EMPHASIS 0x02 + +#define GB_SVC_PWRM_RXTERMINATION 0x01 +#define GB_SVC_PWRM_TXTERMINATION 0x02 +#define GB_SVC_PWRM_LINE_RESET 0x04 +#define GB_SVC_PWRM_SCRAMBLING 0x20 + +#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001 + +#define GB_SVC_UNIPRO_HS_SERIES_A 0x01 +#define GB_SVC_UNIPRO_HS_SERIES_B 0x02 + +#define GB_SVC_SETPWRM_PWR_OK 0x00 +#define GB_SVC_SETPWRM_PWR_LOCAL 0x01 +#define GB_SVC_SETPWRM_PWR_REMOTE 0x02 +#define GB_SVC_SETPWRM_PWR_BUSY 0x03 +#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04 +#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05 + +struct gb_svc_l2_timer_cfg { + __le16 tsb_fc0_protection_timeout; + __le16 tsb_tc0_replay_timeout; + __le16 tsb_afc0_req_timeout; + __le16 tsb_fc1_protection_timeout; + __le16 tsb_tc1_replay_timeout; + __le16 tsb_afc1_req_timeout; + __le16 reserved_for_tc2[3]; + __le16 reserved_for_tc3[3]; +} __packed; + +struct gb_svc_intf_set_pwrm_request { + __u8 intf_id; + __u8 hs_series; + __u8 tx_mode; + __u8 tx_gear; + __u8 tx_nlanes; + __u8 tx_amplitude; + __u8 tx_hs_equalizer; + __u8 rx_mode; + __u8 rx_gear; + __u8 rx_nlanes; + __u8 flags; + __le32 quirks; + struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata; +} __packed; + +struct gb_svc_intf_set_pwrm_response { + __u8 result_code; +} __packed; + +struct gb_svc_key_event_request { + __le16 key_code; +#define GB_KEYCODE_ARA 0x00 + + __u8 key_event; +#define GB_SVC_KEY_RELEASED 0x00 +#define GB_SVC_KEY_PRESSED 0x01 +} __packed; + +#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254 + +struct gb_svc_pwrmon_rail_count_get_response { + __u8 rail_count; +} __packed; + +#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32 + +struct gb_svc_pwrmon_rail_names_get_response { + __u8 status; + __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; +} __packed; + +#define GB_SVC_PWRMON_TYPE_CURR 0x01 +#define GB_SVC_PWRMON_TYPE_VOL 0x02 +#define GB_SVC_PWRMON_TYPE_PWR 0x03 + +#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00 +#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01 +#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02 +#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03 + +struct gb_svc_pwrmon_sample_get_request { + __u8 rail_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_request { + __u8 intf_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001 + +struct gb_svc_module_inserted_request { + __u8 primary_intf_id; + __u8 intf_count; + __le16 flags; +} __packed; +/* module_inserted response has no payload */ + +struct gb_svc_module_removed_request { + __u8 primary_intf_id; +} __packed; +/* module_removed response has no payload */ + +struct gb_svc_intf_activate_request { + __u8 intf_id; +} __packed; + +#define GB_SVC_INTF_TYPE_UNKNOWN 0x00 +#define GB_SVC_INTF_TYPE_DUMMY 0x01 +#define GB_SVC_INTF_TYPE_UNIPRO 0x02 +#define GB_SVC_INTF_TYPE_GREYBUS 0x03 + +struct gb_svc_intf_activate_response { + __u8 status; + __u8 intf_type; +} __packed; + +struct gb_svc_intf_resume_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_resume_response { + __u8 status; +} __packed; + +#define GB_SVC_INTF_MAILBOX_NONE 0x00 +#define GB_SVC_INTF_MAILBOX_AP 0x01 +#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02 + +struct gb_svc_intf_mailbox_event_request { + __u8 intf_id; + __le16 result_code; + __le32 mailbox; +} __packed; +/* intf_mailbox_event response has no payload */ + +struct gb_svc_intf_oops_request { + __u8 intf_id; + __u8 reason; +} __packed; +/* intf_oops response has no payload */ + + +/* RAW */ + +/* Greybus raw request types */ +#define GB_RAW_TYPE_SEND 0x02 + +struct gb_raw_send_request { + __le32 len; + __u8 data[0]; +} __packed; + + +/* UART */ + +/* Greybus UART operation types */ +#define GB_UART_TYPE_SEND_DATA 0x02 +#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */ +#define GB_UART_TYPE_SET_LINE_CODING 0x04 +#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05 +#define GB_UART_TYPE_SEND_BREAK 0x06 +#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */ +#define GB_UART_TYPE_RECEIVE_CREDITS 0x08 +#define GB_UART_TYPE_FLUSH_FIFOS 0x09 + +/* Represents data from AP -> Module */ +struct gb_uart_send_data_request { + __le16 size; + __u8 data[0]; +} __packed; + +/* recv-data-request flags */ +#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */ +#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */ +#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */ +#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */ + +/* Represents data from Module -> AP */ +struct gb_uart_recv_data_request { + __le16 size; + __u8 flags; + __u8 data[0]; +} __packed; + +struct gb_uart_receive_credits_request { + __le16 count; +} __packed; + +struct gb_uart_set_line_coding_request { + __le32 rate; + __u8 format; +#define GB_SERIAL_1_STOP_BITS 0 +#define GB_SERIAL_1_5_STOP_BITS 1 +#define GB_SERIAL_2_STOP_BITS 2 + + __u8 parity; +#define GB_SERIAL_NO_PARITY 0 +#define GB_SERIAL_ODD_PARITY 1 +#define GB_SERIAL_EVEN_PARITY 2 +#define GB_SERIAL_MARK_PARITY 3 +#define GB_SERIAL_SPACE_PARITY 4 + + __u8 data_bits; + + __u8 flow_control; +#define GB_SERIAL_AUTO_RTSCTS_EN 0x1 +} __packed; + +/* output control lines */ +#define GB_UART_CTRL_DTR 0x01 +#define GB_UART_CTRL_RTS 0x02 + +struct gb_uart_set_control_line_state_request { + __u8 control; +} __packed; + +struct gb_uart_set_break_request { + __u8 state; +} __packed; + +/* input control lines and line errors */ +#define GB_UART_CTRL_DCD 0x01 +#define GB_UART_CTRL_DSR 0x02 +#define GB_UART_CTRL_RI 0x04 + +struct gb_uart_serial_state_request { + __u8 control; +} __packed; + +struct gb_uart_serial_flush_request { + __u8 flags; +#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01 +#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02 +} __packed; + +/* Loopback */ + +/* Greybus loopback request types */ +#define GB_LOOPBACK_TYPE_PING 0x02 +#define GB_LOOPBACK_TYPE_TRANSFER 0x03 +#define GB_LOOPBACK_TYPE_SINK 0x04 + +/* + * Loopback request/response header format should be identical + * to simplify bandwidth and data movement analysis. + */ +struct gb_loopback_transfer_request { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +struct gb_loopback_transfer_response { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +/* SDIO */ +/* Greybus SDIO operation types */ +#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02 +#define GB_SDIO_TYPE_SET_IOS 0x03 +#define GB_SDIO_TYPE_COMMAND 0x04 +#define GB_SDIO_TYPE_TRANSFER 0x05 +#define GB_SDIO_TYPE_EVENT 0x06 + +/* get caps response: request has no payload */ +struct gb_sdio_get_caps_response { + __le32 caps; +#define GB_SDIO_CAP_NONREMOVABLE 0x00000001 +#define GB_SDIO_CAP_4_BIT_DATA 0x00000002 +#define GB_SDIO_CAP_8_BIT_DATA 0x00000004 +#define GB_SDIO_CAP_MMC_HS 0x00000008 +#define GB_SDIO_CAP_SD_HS 0x00000010 +#define GB_SDIO_CAP_ERASE 0x00000020 +#define GB_SDIO_CAP_1_2V_DDR 0x00000040 +#define GB_SDIO_CAP_1_8V_DDR 0x00000080 +#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100 +#define GB_SDIO_CAP_UHS_SDR12 0x00000200 +#define GB_SDIO_CAP_UHS_SDR25 0x00000400 +#define GB_SDIO_CAP_UHS_SDR50 0x00000800 +#define GB_SDIO_CAP_UHS_SDR104 0x00001000 +#define GB_SDIO_CAP_UHS_DDR50 0x00002000 +#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000 +#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000 +#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000 +#define GB_SDIO_CAP_HS200_1_2V 0x00020000 +#define GB_SDIO_CAP_HS200_1_8V 0x00040000 +#define GB_SDIO_CAP_HS400_1_2V 0x00080000 +#define GB_SDIO_CAP_HS400_1_8V 0x00100000 + + /* see possible values below at vdd */ + __le32 ocr; + __le32 f_min; + __le32 f_max; + __le16 max_blk_count; + __le16 max_blk_size; +} __packed; + +/* set ios request: response has no payload */ +struct gb_sdio_set_ios_request { + __le32 clock; + __le32 vdd; +#define GB_SDIO_VDD_165_195 0x00000001 +#define GB_SDIO_VDD_20_21 0x00000002 +#define GB_SDIO_VDD_21_22 0x00000004 +#define GB_SDIO_VDD_22_23 0x00000008 +#define GB_SDIO_VDD_23_24 0x00000010 +#define GB_SDIO_VDD_24_25 0x00000020 +#define GB_SDIO_VDD_25_26 0x00000040 +#define GB_SDIO_VDD_26_27 0x00000080 +#define GB_SDIO_VDD_27_28 0x00000100 +#define GB_SDIO_VDD_28_29 0x00000200 +#define GB_SDIO_VDD_29_30 0x00000400 +#define GB_SDIO_VDD_30_31 0x00000800 +#define GB_SDIO_VDD_31_32 0x00001000 +#define GB_SDIO_VDD_32_33 0x00002000 +#define GB_SDIO_VDD_33_34 0x00004000 +#define GB_SDIO_VDD_34_35 0x00008000 +#define GB_SDIO_VDD_35_36 0x00010000 + + __u8 bus_mode; +#define GB_SDIO_BUSMODE_OPENDRAIN 0x00 +#define GB_SDIO_BUSMODE_PUSHPULL 0x01 + + __u8 power_mode; +#define GB_SDIO_POWER_OFF 0x00 +#define GB_SDIO_POWER_UP 0x01 +#define GB_SDIO_POWER_ON 0x02 +#define GB_SDIO_POWER_UNDEFINED 0x03 + + __u8 bus_width; +#define GB_SDIO_BUS_WIDTH_1 0x00 +#define GB_SDIO_BUS_WIDTH_4 0x02 +#define GB_SDIO_BUS_WIDTH_8 0x03 + + __u8 timing; +#define GB_SDIO_TIMING_LEGACY 0x00 +#define GB_SDIO_TIMING_MMC_HS 0x01 +#define GB_SDIO_TIMING_SD_HS 0x02 +#define GB_SDIO_TIMING_UHS_SDR12 0x03 +#define GB_SDIO_TIMING_UHS_SDR25 0x04 +#define GB_SDIO_TIMING_UHS_SDR50 0x05 +#define GB_SDIO_TIMING_UHS_SDR104 0x06 +#define GB_SDIO_TIMING_UHS_DDR50 0x07 +#define GB_SDIO_TIMING_MMC_DDR52 0x08 +#define GB_SDIO_TIMING_MMC_HS200 0x09 +#define GB_SDIO_TIMING_MMC_HS400 0x0A + + __u8 signal_voltage; +#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00 +#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01 +#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02 + + __u8 drv_type; +#define GB_SDIO_SET_DRIVER_TYPE_B 0x00 +#define GB_SDIO_SET_DRIVER_TYPE_A 0x01 +#define GB_SDIO_SET_DRIVER_TYPE_C 0x02 +#define GB_SDIO_SET_DRIVER_TYPE_D 0x03 +} __packed; + +/* command request */ +struct gb_sdio_command_request { + __u8 cmd; + __u8 cmd_flags; +#define GB_SDIO_RSP_NONE 0x00 +#define GB_SDIO_RSP_PRESENT 0x01 +#define GB_SDIO_RSP_136 0x02 +#define GB_SDIO_RSP_CRC 0x04 +#define GB_SDIO_RSP_BUSY 0x08 +#define GB_SDIO_RSP_OPCODE 0x10 + + __u8 cmd_type; +#define GB_SDIO_CMD_AC 0x00 +#define GB_SDIO_CMD_ADTC 0x01 +#define GB_SDIO_CMD_BC 0x02 +#define GB_SDIO_CMD_BCR 0x03 + + __le32 cmd_arg; + __le16 data_blocks; + __le16 data_blksz; +} __packed; + +struct gb_sdio_command_response { + __le32 resp[4]; +} __packed; + +/* transfer request */ +struct gb_sdio_transfer_request { + __u8 data_flags; +#define GB_SDIO_DATA_WRITE 0x01 +#define GB_SDIO_DATA_READ 0x02 +#define GB_SDIO_DATA_STREAM 0x04 + + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +struct gb_sdio_transfer_response { + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +/* event request: generated by module and is defined as unidirectional */ +struct gb_sdio_event_request { + __u8 event; +#define GB_SDIO_CARD_INSERTED 0x01 +#define GB_SDIO_CARD_REMOVED 0x02 +#define GB_SDIO_WP 0x04 +} __packed; + +/* Camera */ + +/* Greybus Camera request types */ +#define GB_CAMERA_TYPE_CAPABILITIES 0x02 +#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03 +#define GB_CAMERA_TYPE_CAPTURE 0x04 +#define GB_CAMERA_TYPE_FLUSH 0x05 +#define GB_CAMERA_TYPE_METADATA 0x06 + +#define GB_CAMERA_MAX_STREAMS 4 +#define GB_CAMERA_MAX_SETTINGS_SIZE 8192 + +/* Greybus Camera Configure Streams request payload */ +struct gb_camera_stream_config_request { + __le16 width; + __le16 height; + __le16 format; + __le16 padding; +} __packed; + +struct gb_camera_configure_streams_request { + __u8 num_streams; + __u8 flags; +#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 + __le16 padding; + struct gb_camera_stream_config_request config[0]; +} __packed; + +/* Greybus Camera Configure Streams response payload */ +struct gb_camera_stream_config_response { + __le16 width; + __le16 height; + __le16 format; + __u8 virtual_channel; + __u8 data_type[2]; + __le16 max_pkt_size; + __u8 padding; + __le32 max_size; +} __packed; + +struct gb_camera_configure_streams_response { + __u8 num_streams; +#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01 + __u8 flags; + __u8 padding[2]; + __le32 data_rate; + struct gb_camera_stream_config_response config[0]; +}; + +/* Greybus Camera Capture request payload - response has no payload */ +struct gb_camera_capture_request { + __le32 request_id; + __u8 streams; + __u8 padding; + __le16 num_frames; + __u8 settings[0]; +} __packed; + +/* Greybus Camera Flush response payload - request has no payload */ +struct gb_camera_flush_response { + __le32 request_id; +} __packed; + +/* Greybus Camera Metadata request payload - operation has no response */ +struct gb_camera_metadata_request { + __le32 request_id; + __le16 frame_number; + __u8 stream; + __u8 padding; + __u8 metadata[0]; +} __packed; + +/* Lights */ + +/* Greybus Lights request types */ +#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02 +#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03 +#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04 +#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05 +#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06 +#define GB_LIGHTS_TYPE_SET_BLINK 0x07 +#define GB_LIGHTS_TYPE_SET_COLOR 0x08 +#define GB_LIGHTS_TYPE_SET_FADE 0x09 +#define GB_LIGHTS_TYPE_EVENT 0x0A +#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B +#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C +#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D +#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E + +/* Greybus Light modes */ + +/* + * if you add any specific mode below, update also the + * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly + */ +#define GB_CHANNEL_MODE_NONE 0x00000000 +#define GB_CHANNEL_MODE_BATTERY 0x00000001 +#define GB_CHANNEL_MODE_POWER 0x00000002 +#define GB_CHANNEL_MODE_WIRELESS 0x00000004 +#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008 +#define GB_CHANNEL_MODE_KEYBOARD 0x00000010 +#define GB_CHANNEL_MODE_BUTTONS 0x00000020 +#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040 +#define GB_CHANNEL_MODE_ATTENTION 0x00000080 +#define GB_CHANNEL_MODE_FLASH 0x00000100 +#define GB_CHANNEL_MODE_TORCH 0x00000200 +#define GB_CHANNEL_MODE_INDICATOR 0x00000400 + +/* Lights Mode valid bit values */ +#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF +#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000 + +/* Greybus Light Channels Flags */ +#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001 +#define GB_LIGHT_CHANNEL_FADER 0x00000002 +#define GB_LIGHT_CHANNEL_BLINK 0x00000004 + +/* get count of lights in module */ +struct gb_lights_get_lights_response { + __u8 lights_count; +} __packed; + +/* light config request payload */ +struct gb_lights_get_light_config_request { + __u8 id; +} __packed; + +/* light config response payload */ +struct gb_lights_get_light_config_response { + __u8 channel_count; + __u8 name[32]; +} __packed; + +/* channel config request payload */ +struct gb_lights_get_channel_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel flash config request payload */ +struct gb_lights_get_channel_flash_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel config response payload */ +struct gb_lights_get_channel_config_response { + __u8 max_brightness; + __le32 flags; + __le32 color; + __u8 color_name[32]; + __le32 mode; + __u8 mode_name[32]; +} __packed; + +/* channel flash config response payload */ +struct gb_lights_get_channel_flash_config_response { + __le32 intensity_min_uA; + __le32 intensity_max_uA; + __le32 intensity_step_uA; + __le32 timeout_min_us; + __le32 timeout_max_us; + __le32 timeout_step_us; +} __packed; + +/* blink request payload: response have no payload */ +struct gb_lights_blink_request { + __u8 light_id; + __u8 channel_id; + __le16 time_on_ms; + __le16 time_off_ms; +} __packed; + +/* set brightness request payload: response have no payload */ +struct gb_lights_set_brightness_request { + __u8 light_id; + __u8 channel_id; + __u8 brightness; +} __packed; + +/* set color request payload: response have no payload */ +struct gb_lights_set_color_request { + __u8 light_id; + __u8 channel_id; + __le32 color; +} __packed; + +/* set fade request payload: response have no payload */ +struct gb_lights_set_fade_request { + __u8 light_id; + __u8 channel_id; + __u8 fade_in; + __u8 fade_out; +} __packed; + +/* event request: generated by module */ +struct gb_lights_event_request { + __u8 light_id; + __u8 event; +#define GB_LIGHTS_LIGHT_CONFIG 0x01 +} __packed; + +/* set flash intensity request payload: response have no payload */ +struct gb_lights_set_flash_intensity_request { + __u8 light_id; + __u8 channel_id; + __le32 intensity_uA; +} __packed; + +/* set flash strobe state request payload: response have no payload */ +struct gb_lights_set_flash_strobe_request { + __u8 light_id; + __u8 channel_id; + __u8 state; +} __packed; + +/* set flash timeout request payload: response have no payload */ +struct gb_lights_set_flash_timeout_request { + __u8 light_id; + __u8 channel_id; + __le32 timeout_us; +} __packed; + +/* get flash fault request payload */ +struct gb_lights_get_flash_fault_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* get flash fault response payload */ +struct gb_lights_get_flash_fault_response { + __le32 fault; +#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000 +#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001 +#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002 +#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004 +#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008 +#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010 +#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020 +#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040 +#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080 +} __packed; + +/* Audio */ + +#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02 +#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03 +#define GB_AUDIO_TYPE_GET_CONTROL 0x04 +#define GB_AUDIO_TYPE_SET_CONTROL 0x05 +#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06 +#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07 +#define GB_AUDIO_TYPE_GET_PCM 0x08 +#define GB_AUDIO_TYPE_SET_PCM 0x09 +#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a + /* 0x0b unused */ +#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c +#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d +#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e + /* 0x0f unused */ +#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10 +#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11 +#define GB_AUDIO_TYPE_JACK_EVENT 0x12 +#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13 +#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14 +#define GB_AUDIO_TYPE_SEND_DATA 0x15 + +/* Module must be able to buffer 10ms of audio data, minimum */ +#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000 + +#define GB_AUDIO_PCM_NAME_MAX 32 +#define AUDIO_DAI_NAME_MAX 32 +#define AUDIO_CONTROL_NAME_MAX 32 +#define AUDIO_CTL_ELEM_NAME_MAX 44 +#define AUDIO_ENUM_NAME_MAX 64 +#define AUDIO_WIDGET_NAME_MAX 32 + +/* See SNDRV_PCM_FMTBIT_* in Linux source */ +#define GB_AUDIO_PCM_FMT_S8 BIT(0) +#define GB_AUDIO_PCM_FMT_U8 BIT(1) +#define GB_AUDIO_PCM_FMT_S16_LE BIT(2) +#define GB_AUDIO_PCM_FMT_S16_BE BIT(3) +#define GB_AUDIO_PCM_FMT_U16_LE BIT(4) +#define GB_AUDIO_PCM_FMT_U16_BE BIT(5) +#define GB_AUDIO_PCM_FMT_S24_LE BIT(6) +#define GB_AUDIO_PCM_FMT_S24_BE BIT(7) +#define GB_AUDIO_PCM_FMT_U24_LE BIT(8) +#define GB_AUDIO_PCM_FMT_U24_BE BIT(9) +#define GB_AUDIO_PCM_FMT_S32_LE BIT(10) +#define GB_AUDIO_PCM_FMT_S32_BE BIT(11) +#define GB_AUDIO_PCM_FMT_U32_LE BIT(12) +#define GB_AUDIO_PCM_FMT_U32_BE BIT(13) + +/* See SNDRV_PCM_RATE_* in Linux source */ +#define GB_AUDIO_PCM_RATE_5512 BIT(0) +#define GB_AUDIO_PCM_RATE_8000 BIT(1) +#define GB_AUDIO_PCM_RATE_11025 BIT(2) +#define GB_AUDIO_PCM_RATE_16000 BIT(3) +#define GB_AUDIO_PCM_RATE_22050 BIT(4) +#define GB_AUDIO_PCM_RATE_32000 BIT(5) +#define GB_AUDIO_PCM_RATE_44100 BIT(6) +#define GB_AUDIO_PCM_RATE_48000 BIT(7) +#define GB_AUDIO_PCM_RATE_64000 BIT(8) +#define GB_AUDIO_PCM_RATE_88200 BIT(9) +#define GB_AUDIO_PCM_RATE_96000 BIT(10) +#define GB_AUDIO_PCM_RATE_176400 BIT(11) +#define GB_AUDIO_PCM_RATE_192000 BIT(12) + +#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1 +#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2 + +#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0) +#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1) + +/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02 +#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06 + +/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00 +#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01 +#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02 +#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03 +#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04 +#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05 +#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06 + +/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */ +#define GB_AUDIO_ACCESS_READ BIT(0) +#define GB_AUDIO_ACCESS_WRITE BIT(1) +#define GB_AUDIO_ACCESS_VOLATILE BIT(2) +#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3) +#define GB_AUDIO_ACCESS_TLV_READ BIT(4) +#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5) +#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6) +#define GB_AUDIO_ACCESS_INACTIVE BIT(7) +#define GB_AUDIO_ACCESS_LOCK BIT(8) +#define GB_AUDIO_ACCESS_OWNER BIT(9) + +/* enum snd_soc_dapm_type */ +#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0 +#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1 +#define GB_AUDIO_WIDGET_TYPE_MUX 0x2 +#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3 +#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4 +#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5 +#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6 +#define GB_AUDIO_WIDGET_TYPE_PGA 0x7 +#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8 +#define GB_AUDIO_WIDGET_TYPE_ADC 0x9 +#define GB_AUDIO_WIDGET_TYPE_DAC 0xa +#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb +#define GB_AUDIO_WIDGET_TYPE_MIC 0xc +#define GB_AUDIO_WIDGET_TYPE_HP 0xd +#define GB_AUDIO_WIDGET_TYPE_SPK 0xe +#define GB_AUDIO_WIDGET_TYPE_LINE 0xf +#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10 +#define GB_AUDIO_WIDGET_TYPE_VMID 0x11 +#define GB_AUDIO_WIDGET_TYPE_PRE 0x12 +#define GB_AUDIO_WIDGET_TYPE_POST 0x13 +#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14 +#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15 +#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16 +#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17 +#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18 +#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19 +#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a +#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b +#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c + +#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01 +#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02 + +#define GB_AUDIO_JACK_EVENT_INSERTION 0x1 +#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2 + +#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1 +#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2 + +#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1 +#define GB_AUDIO_STREAMING_EVENT_HALT 0x2 +#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3 +#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4 +#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5 +#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6 +#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7 +#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8 +#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9 + +#define GB_AUDIO_INVALID_INDEX 0xff + +/* enum snd_jack_types */ +#define GB_AUDIO_JACK_HEADPHONE 0x0000001 +#define GB_AUDIO_JACK_MICROPHONE 0x0000002 +#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE) +#define GB_AUDIO_JACK_LINEOUT 0x0000004 +#define GB_AUDIO_JACK_MECHANICAL 0x0000008 +#define GB_AUDIO_JACK_VIDEOOUT 0x0000010 +#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \ + GB_AUDIO_JACK_VIDEOOUT) +#define GB_AUDIO_JACK_LINEIN 0x0000020 +#define GB_AUDIO_JACK_OC_HPHL 0x0000040 +#define GB_AUDIO_JACK_OC_HPHR 0x0000080 +#define GB_AUDIO_JACK_MICROPHONE2 0x0000200 +#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE | \ + GB_AUDIO_JACK_MICROPHONE2) +/* Kept separate from switches to facilitate implementation */ +#define GB_AUDIO_JACK_BTN_0 0x4000000 +#define GB_AUDIO_JACK_BTN_1 0x2000000 +#define GB_AUDIO_JACK_BTN_2 0x1000000 +#define GB_AUDIO_JACK_BTN_3 0x0800000 + +struct gb_audio_pcm { + __u8 stream_name[GB_AUDIO_PCM_NAME_MAX]; + __le32 formats; /* GB_AUDIO_PCM_FMT_* */ + __le32 rates; /* GB_AUDIO_PCM_RATE_* */ + __u8 chan_min; + __u8 chan_max; + __u8 sig_bits; /* number of bits of content */ +} __packed; + +struct gb_audio_dai { + __u8 name[AUDIO_DAI_NAME_MAX]; + __le16 data_cport; + struct gb_audio_pcm capture; + struct gb_audio_pcm playback; +} __packed; + +struct gb_audio_integer { + __le32 min; + __le32 max; + __le32 step; +} __packed; + +struct gb_audio_integer64 { + __le64 min; + __le64 max; + __le64 step; +} __packed; + +struct gb_audio_enumerated { + __le32 items; + __le16 names_length; + __u8 names[0]; +} __packed; + +struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ + __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */ + __le16 dimen[4]; + union { + struct gb_audio_integer integer; + struct gb_audio_integer64 integer64; + struct gb_audio_enumerated enumerated; + } value; +} __packed; + +struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */ + __le64 timestamp; /* XXX needed? */ + union { + __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */ + __le64 integer64_value[2]; + __le32 enumerated_item[2]; + } value; +} __packed; + +struct gb_audio_control { + __u8 name[AUDIO_CONTROL_NAME_MAX]; + __u8 id; /* 0-63 */ + __u8 iface; /* GB_AUDIO_IFACE_* */ + __le16 data_cport; + __le32 access; /* GB_AUDIO_ACCESS_* */ + __u8 count; /* count of same elements */ + __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */ + struct gb_audio_ctl_elem_info info; +} __packed; + +struct gb_audio_widget { + __u8 name[AUDIO_WIDGET_NAME_MAX]; + __u8 sname[AUDIO_WIDGET_NAME_MAX]; + __u8 id; + __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ + __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ + __u8 ncontrols; + struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ +} __packed; + +struct gb_audio_route { + __u8 source_id; /* widget id */ + __u8 destination_id; /* widget id */ + __u8 control_id; /* 0-63 */ + __u8 index; /* Selection within the control */ +} __packed; + +struct gb_audio_topology { + __u8 num_dais; + __u8 num_controls; + __u8 num_widgets; + __u8 num_routes; + __le32 size_dais; + __le32 size_controls; + __le32 size_widgets; + __le32 size_routes; + __le32 jack_type; + /* + * struct gb_audio_dai dai[num_dais]; + * struct gb_audio_control controls[num_controls]; + * struct gb_audio_widget widgets[num_widgets]; + * struct gb_audio_route routes[num_routes]; + */ + __u8 data[0]; +} __packed; + +struct gb_audio_get_topology_size_response { + __le16 size; +} __packed; + +struct gb_audio_get_topology_response { + struct gb_audio_topology topology; +} __packed; + +struct gb_audio_get_control_request { + __u8 control_id; + __u8 index; +} __packed; + +struct gb_audio_get_control_response { + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_set_control_request { + __u8 control_id; + __u8 index; + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_enable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_disable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_get_pcm_request { + __le16 data_cport; +} __packed; + +struct gb_audio_get_pcm_response { + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_pcm_request { + __le16 data_cport; + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_tx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_set_rx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_jack_event_request { + __u8 widget_id; + __u8 jack_attribute; + __u8 event; +} __packed; + +struct gb_audio_button_event_request { + __u8 widget_id; + __u8 button_id; + __u8 event; +} __packed; + +struct gb_audio_streaming_event_request { + __le16 data_cport; + __u8 event; +} __packed; + +struct gb_audio_send_data_request { + __le64 timestamp; + __u8 data[0]; +} __packed; + + +/* Log */ + +/* operations */ +#define GB_LOG_TYPE_SEND_LOG 0x02 + +/* length */ +#define GB_LOG_MAX_LEN 1024 + +struct gb_log_send_log_request { + __le16 len; + __u8 msg[0]; +} __packed; + +#endif /* __GREYBUS_PROTOCOLS_H */ + diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h new file mode 100644 index 000000000000..348b76fabc9a --- /dev/null +++ b/include/linux/greybus/hd.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Host Device + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __HD_H +#define __HD_H + +struct gb_host_device; +struct gb_message; + +struct gb_hd_driver { + size_t hd_priv_size; + + int (*cport_allocate)(struct gb_host_device *hd, int cport_id, + unsigned long flags); + void (*cport_release)(struct gb_host_device *hd, u16 cport_id); + int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, + unsigned long flags); + int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); + int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); + int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); + int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, + u8 phase, unsigned int timeout); + int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, + size_t peer_space, unsigned int timeout); + int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); + + int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, + struct gb_message *message, gfp_t gfp_mask); + void (*message_cancel)(struct gb_message *message); + int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); + int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); + int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool async); +}; + +struct gb_host_device { + struct device dev; + int bus_id; + const struct gb_hd_driver *driver; + + struct list_head modules; + struct list_head connections; + struct ida cport_id_map; + + /* Number of CPorts supported by the UniPro IP */ + size_t num_cports; + + /* Host device buffer constraints */ + size_t buffer_size_max; + + struct gb_svc *svc; + /* Private data for the host driver */ + unsigned long hd_priv[0] __aligned(sizeof(s64)); +}; +#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) + +int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); +void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); +int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, + unsigned long flags); +void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); + +struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, + struct device *parent, + size_t buffer_size_max, + size_t num_cports); +int gb_hd_add(struct gb_host_device *hd); +void gb_hd_del(struct gb_host_device *hd); +void gb_hd_shutdown(struct gb_host_device *hd); +void gb_hd_put(struct gb_host_device *hd); +int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool in_irq); + +int gb_hd_init(void); +void gb_hd_exit(void); + +#endif /* __HD_H */ diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h new file mode 100644 index 000000000000..8fb1eacda302 --- /dev/null +++ b/include/linux/greybus/interface.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Interface Block code + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __INTERFACE_H +#define __INTERFACE_H + +enum gb_interface_type { + GB_INTERFACE_TYPE_INVALID = 0, + GB_INTERFACE_TYPE_UNKNOWN, + GB_INTERFACE_TYPE_DUMMY, + GB_INTERFACE_TYPE_UNIPRO, + GB_INTERFACE_TYPE_GREYBUS, +}; + +#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0) +#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) +#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2) +#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) +#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4) +#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5) +#define GB_INTERFACE_QUIRK_NO_PM BIT(6) + +struct gb_interface { + struct device dev; + struct gb_control *control; + + struct list_head bundles; + struct list_head module_node; + struct list_head manifest_descs; + u8 interface_id; /* Physical location within the Endo */ + u8 device_id; + u8 features; /* Feature flags set in the manifest */ + + enum gb_interface_type type; + + u32 ddbl1_manufacturer_id; + u32 ddbl1_product_id; + u32 vendor_id; + u32 product_id; + u64 serial_number; + + struct gb_host_device *hd; + struct gb_module *module; + + unsigned long quirks; + + struct mutex mutex; + + bool disconnected; + + bool ejected; + bool removed; + bool active; + bool enabled; + bool mode_switch; + bool dme_read; + + struct work_struct mode_switch_work; + struct completion mode_switch_completion; +}; +#define to_gb_interface(d) container_of(d, struct gb_interface, dev) + +struct gb_interface *gb_interface_create(struct gb_module *module, + u8 interface_id); +int gb_interface_activate(struct gb_interface *intf); +void gb_interface_deactivate(struct gb_interface *intf); +int gb_interface_enable(struct gb_interface *intf); +void gb_interface_disable(struct gb_interface *intf); +int gb_interface_add(struct gb_interface *intf); +void gb_interface_del(struct gb_interface *intf); +void gb_interface_put(struct gb_interface *intf); +void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, + u32 mailbox); + +int gb_interface_request_mode_switch(struct gb_interface *intf); + +#endif /* __INTERFACE_H */ diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h new file mode 100644 index 000000000000..88aa7e44cad5 --- /dev/null +++ b/include/linux/greybus/manifest.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest parsing + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __MANIFEST_H +#define __MANIFEST_H + +struct gb_interface; +bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); + +#endif /* __MANIFEST_H */ diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h new file mode 100644 index 000000000000..2a27e520ee94 --- /dev/null +++ b/include/linux/greybus/module.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Module code + * + * Copyright 2016 Google Inc. + * Copyright 2016 Linaro Ltd. + */ + +#ifndef __MODULE_H +#define __MODULE_H + +struct gb_module { + struct device dev; + struct gb_host_device *hd; + + struct list_head hd_node; + + u8 module_id; + size_t num_interfaces; + + bool disconnected; + + struct gb_interface *interfaces[0]; +}; +#define to_gb_module(d) container_of(d, struct gb_module, dev) + +struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, + size_t num_interfaces); +int gb_module_add(struct gb_module *module); +void gb_module_del(struct gb_module *module); +void gb_module_put(struct gb_module *module); + +#endif /* __MODULE_H */ diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h new file mode 100644 index 000000000000..17ba3daf111b --- /dev/null +++ b/include/linux/greybus/operation.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus operations + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __OPERATION_H +#define __OPERATION_H + +#include + +struct gb_operation; + +/* The default amount of time a request is given to complete */ +#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ + +/* + * The top bit of the type in an operation message header indicates + * whether the message is a request (bit clear) or response (bit set) + */ +#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80) + +enum gb_operation_result { + GB_OP_SUCCESS = 0x00, + GB_OP_INTERRUPTED = 0x01, + GB_OP_TIMEOUT = 0x02, + GB_OP_NO_MEMORY = 0x03, + GB_OP_PROTOCOL_BAD = 0x04, + GB_OP_OVERFLOW = 0x05, + GB_OP_INVALID = 0x06, + GB_OP_RETRY = 0x07, + GB_OP_NONEXISTENT = 0x08, + GB_OP_UNKNOWN_ERROR = 0xfe, + GB_OP_MALFUNCTION = 0xff, +}; + +#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr) +#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX + +/* + * Protocol code should only examine the payload and payload_size fields, and + * host-controller drivers may use the hcpriv field. All other fields are + * intended to be private to the operations core code. + */ +struct gb_message { + struct gb_operation *operation; + struct gb_operation_msg_hdr *header; + + void *payload; + size_t payload_size; + + void *buffer; + + void *hcpriv; +}; + +#define GB_OPERATION_FLAG_INCOMING BIT(0) +#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1) +#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2) +#define GB_OPERATION_FLAG_CORE BIT(3) + +#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \ + GB_OPERATION_FLAG_UNIDIRECTIONAL) + +/* + * A Greybus operation is a remote procedure call performed over a + * connection between two UniPro interfaces. + * + * Every operation consists of a request message sent to the other + * end of the connection coupled with a reply message returned to + * the sender. Every operation has a type, whose interpretation is + * dependent on the protocol associated with the connection. + * + * Only four things in an operation structure are intended to be + * directly usable by protocol handlers: the operation's connection + * pointer; the operation type; the request message payload (and + * size); and the response message payload (and size). Note that a + * message with a 0-byte payload has a null message payload pointer. + * + * In addition, every operation has a result, which is an errno + * value. Protocol handlers access the operation result using + * gb_operation_result(). + */ +typedef void (*gb_operation_callback)(struct gb_operation *); +struct gb_operation { + struct gb_connection *connection; + struct gb_message *request; + struct gb_message *response; + + unsigned long flags; + u8 type; + u16 id; + int errno; /* Operation result */ + + struct work_struct work; + gb_operation_callback callback; + struct completion completion; + struct timer_list timer; + + struct kref kref; + atomic_t waiters; + + int active; + struct list_head links; /* connection->operations */ + + void *private; +}; + +static inline bool +gb_operation_is_incoming(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_INCOMING; +} + +static inline bool +gb_operation_is_unidirectional(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; +} + +static inline bool +gb_operation_short_response_allowed(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; +} + +static inline bool gb_operation_is_core(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_CORE; +} + +void gb_connection_recv(struct gb_connection *connection, + void *data, size_t size); + +int gb_operation_result(struct gb_operation *operation); + +size_t gb_operation_get_payload_size_max(struct gb_connection *connection); +struct gb_operation * +gb_operation_create_flags(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +static inline struct gb_operation * +gb_operation_create(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, gfp_t gfp) +{ + return gb_operation_create_flags(connection, type, request_size, + response_size, 0, gfp); +} + +struct gb_operation * +gb_operation_create_core(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +void gb_operation_get(struct gb_operation *operation); +void gb_operation_put(struct gb_operation *operation); + +bool gb_operation_response_alloc(struct gb_operation *operation, + size_t response_size, gfp_t gfp); + +int gb_operation_request_send(struct gb_operation *operation, + gb_operation_callback callback, + unsigned int timeout, + gfp_t gfp); +int gb_operation_request_send_sync_timeout(struct gb_operation *operation, + unsigned int timeout); +static inline int +gb_operation_request_send_sync(struct gb_operation *operation) +{ + return gb_operation_request_send_sync_timeout(operation, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +void gb_operation_cancel(struct gb_operation *operation, int errno); +void gb_operation_cancel_incoming(struct gb_operation *operation, int errno); + +void greybus_message_sent(struct gb_host_device *hd, + struct gb_message *message, int status); + +int gb_operation_sync_timeout(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size, + unsigned int timeout); +int gb_operation_unidirectional_timeout(struct gb_connection *connection, + int type, void *request, int request_size, + unsigned int timeout); + +static inline int gb_operation_sync(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size) +{ + return gb_operation_sync_timeout(connection, type, + request, request_size, response, response_size, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline int gb_operation_unidirectional(struct gb_connection *connection, + int type, void *request, int request_size) +{ + return gb_operation_unidirectional_timeout(connection, type, + request, request_size, GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline void *gb_operation_get_data(struct gb_operation *operation) +{ + return operation->private; +} + +static inline void gb_operation_set_data(struct gb_operation *operation, + void *data) +{ + operation->private = data; +} + +int gb_operation_init(void); +void gb_operation_exit(void); + +#endif /* !__OPERATION_H */ diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h new file mode 100644 index 000000000000..e7452057cfe4 --- /dev/null +++ b/include/linux/greybus/svc.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus SVC code + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __SVC_H +#define __SVC_H + +#define GB_SVC_CPORT_FLAG_E2EFC BIT(0) +#define GB_SVC_CPORT_FLAG_CSD_N BIT(1) +#define GB_SVC_CPORT_FLAG_CSV_N BIT(2) + +enum gb_svc_state { + GB_SVC_STATE_RESET, + GB_SVC_STATE_PROTOCOL_VERSION, + GB_SVC_STATE_SVC_HELLO, +}; + +enum gb_svc_watchdog_bite { + GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0, + GB_SVC_WATCHDOG_BITE_PANIC_KERNEL, +}; + +struct gb_svc_watchdog; + +struct svc_debugfs_pwrmon_rail { + u8 id; + struct gb_svc *svc; +}; + +struct gb_svc { + struct device dev; + + struct gb_host_device *hd; + struct gb_connection *connection; + enum gb_svc_state state; + struct ida device_id_map; + struct workqueue_struct *wq; + + u16 endo_id; + u8 ap_intf_id; + + u8 protocol_major; + u8 protocol_minor; + + struct gb_svc_watchdog *watchdog; + enum gb_svc_watchdog_bite action; + + struct dentry *debugfs_dentry; + struct svc_debugfs_pwrmon_rail *pwrmon_rails; +}; +#define to_gb_svc(d) container_of(d, struct gb_svc, dev) + +struct gb_svc *gb_svc_create(struct gb_host_device *hd); +int gb_svc_add(struct gb_svc *svc); +void gb_svc_del(struct gb_svc *svc); +void gb_svc_put(struct gb_svc *svc); + +int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, + u8 measurement_type, u32 *value); +int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id); +int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, + u8 intf2_id, u8 dev2_id); +void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id); +int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id, u8 cport_flags); +void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id); +int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id); +int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type); +int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id); + +int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 *value); +int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 value); +int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, + u8 tx_mode, u8 tx_gear, u8 tx_nlanes, + u8 tx_amplitude, u8 tx_hs_equalizer, + u8 rx_mode, u8 rx_gear, u8 rx_nlanes, + u8 flags, u32 quirks, + struct gb_svc_l2_timer_cfg *local, + struct gb_svc_l2_timer_cfg *remote); +int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id); +int gb_svc_ping(struct gb_svc *svc); +int gb_svc_watchdog_create(struct gb_svc *svc); +void gb_svc_watchdog_destroy(struct gb_svc *svc); +bool gb_svc_watchdog_enabled(struct gb_svc *svc); +int gb_svc_watchdog_enable(struct gb_svc *svc); +int gb_svc_watchdog_disable(struct gb_svc *svc); + +int gb_svc_protocol_init(void); +void gb_svc_protocol_exit(void); + +#endif /* __SVC_H */ -- cgit v1.2.3-71-gd317 From c10bf3921e743dadb11a6cf59ffaf38cdbeb281b Mon Sep 17 00:00:00 2001 From: Rui Miguel Silva Date: Tue, 27 Aug 2019 16:53:02 +0100 Subject: staging: greybus: add missing includes Before moving greybus core out of staging and moving header files to include/linux some greybus header files were missing the necessary includes. This would trigger compilation faillures with some example errors logged bellow for with CONFIG_KERNEL_HEADER_TEST=y. So, add the necessary headers to compile clean before relocating the header files. ./include/linux/greybus/hd.h:23:50: error: unknown type name 'u16' int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); ^~~ ./include/linux/greybus/greybus_protocols.h:1314:2: error: unknown type name '__u8' __u8 data[0]; ^~~~ ./include/linux/greybus/hd.h:24:52: error: unknown type name 'u16' int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); ^~~ ./include/linux/greybus/hd.h:25:48: error: unknown type name 'u16' int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); ^~~ ./include/linux/greybus/hd.h:26:51: error: unknown type name 'u16' int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, ^~~ ./include/linux/greybus/hd.h:27:5: error: unknown type name 'u8' u8 phase, unsigned int timeout); ^~ ./include/linux/greybus/hd.h:28:50: error: unknown type name 'u16' int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, ^~~ ./include/linux/greybus/hd.h:29:5: error: unknown type name 'size_t' size_t peer_space, unsigned int timeout); ^~~~~~ ./include/linux/greybus/hd.h:29:5: note: 'size_t' is defined in header ''; did you forget to '#include '? ./include/linux/greybus/hd.h:1:1: +#include /* SPDX-License-Identifier: GPL-2.0 */ ./include/linux/greybus/hd.h:29:5: size_t peer_space, unsigned int timeout); ^~~~~~ ./include/linux/greybus/hd.h:30:48: error: unknown type name 'u16' int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); ^~~ ./include/linux/greybus/hd.h:32:49: error: unknown type name 'u16' int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, ^~~ ./include/linux/greybus/hd.h:33:32: error: unknown type name 'gfp_t' struct gb_message *message, gfp_t gfp_mask); ^~~~~ ./include/linux/greybus/hd.h:35:55: error: unknown type name 'u16' int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); Reported-by: kbuild test robot Reported-by: Gao Xiang Signed-off-by: Rui Miguel Silva Signed-off-by: Rui Miguel Silva Link: https://lore.kernel.org/r/20190827155302.25704-1-rui.silva@linaro.org Signed-off-by: Greg Kroah-Hartman --- include/linux/greybus/bundle.h | 3 +++ include/linux/greybus/connection.h | 3 +++ include/linux/greybus/control.h | 3 +++ include/linux/greybus/greybus_manifest.h | 3 +++ include/linux/greybus/greybus_protocols.h | 2 ++ include/linux/greybus/hd.h | 3 +++ include/linux/greybus/interface.h | 3 +++ include/linux/greybus/manifest.h | 2 ++ include/linux/greybus/module.h | 3 +++ include/linux/greybus/operation.h | 5 +++++ include/linux/greybus/svc.h | 3 +++ 11 files changed, 33 insertions(+) (limited to 'include') diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h index 8734d2055657..df8d88424cb7 100644 --- a/include/linux/greybus/bundle.h +++ b/include/linux/greybus/bundle.h @@ -9,7 +9,10 @@ #ifndef __BUNDLE_H #define __BUNDLE_H +#include #include +#include +#include #define BUNDLE_ID_NONE U8_MAX diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h index 5ca3befc0636..d59b7fc1de3e 100644 --- a/include/linux/greybus/connection.h +++ b/include/linux/greybus/connection.h @@ -9,8 +9,11 @@ #ifndef __CONNECTION_H #define __CONNECTION_H +#include #include #include +#include +#include #define GB_CONNECTION_FLAG_CSD BIT(0) #define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h index 3a29ec05f631..da11fe871653 100644 --- a/include/linux/greybus/control.h +++ b/include/linux/greybus/control.h @@ -9,6 +9,9 @@ #ifndef __CONTROL_H #define __CONTROL_H +#include +#include + struct gb_control { struct device dev; struct gb_interface *intf; diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h index db68f7e7d5a7..6e62fe478712 100644 --- a/include/linux/greybus/greybus_manifest.h +++ b/include/linux/greybus/greybus_manifest.h @@ -14,6 +14,9 @@ #ifndef __GREYBUS_MANIFEST_H #define __GREYBUS_MANIFEST_H +#include +#include + enum greybus_descriptor_type { GREYBUS_TYPE_INVALID = 0x00, GREYBUS_TYPE_INTERFACE = 0x01, diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h index 5f34d1effb59..dfbc6c39a74b 100644 --- a/include/linux/greybus/greybus_protocols.h +++ b/include/linux/greybus/greybus_protocols.h @@ -7,6 +7,8 @@ #ifndef __GREYBUS_PROTOCOLS_H #define __GREYBUS_PROTOCOLS_H +#include + /* Fixed IDs for control/svc protocols */ /* SVC switch-port device ids */ diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h index 348b76fabc9a..d3faf0c1a569 100644 --- a/include/linux/greybus/hd.h +++ b/include/linux/greybus/hd.h @@ -9,6 +9,9 @@ #ifndef __HD_H #define __HD_H +#include +#include + struct gb_host_device; struct gb_message; diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h index 8fb1eacda302..ce4def881e6f 100644 --- a/include/linux/greybus/interface.h +++ b/include/linux/greybus/interface.h @@ -9,6 +9,9 @@ #ifndef __INTERFACE_H #define __INTERFACE_H +#include +#include + enum gb_interface_type { GB_INTERFACE_TYPE_INVALID = 0, GB_INTERFACE_TYPE_UNKNOWN, diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h index 88aa7e44cad5..830301b7a8bc 100644 --- a/include/linux/greybus/manifest.h +++ b/include/linux/greybus/manifest.h @@ -9,6 +9,8 @@ #ifndef __MANIFEST_H #define __MANIFEST_H +#include + struct gb_interface; bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h index 2a27e520ee94..47b839af145d 100644 --- a/include/linux/greybus/module.h +++ b/include/linux/greybus/module.h @@ -9,6 +9,9 @@ #ifndef __MODULE_H #define __MODULE_H +#include +#include + struct gb_module { struct device dev; struct gb_host_device *hd; diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h index 17ba3daf111b..8ca864bba23e 100644 --- a/include/linux/greybus/operation.h +++ b/include/linux/greybus/operation.h @@ -10,6 +10,11 @@ #define __OPERATION_H #include +#include +#include +#include +#include + struct gb_operation; diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h index e7452057cfe4..507f8bd4e4c8 100644 --- a/include/linux/greybus/svc.h +++ b/include/linux/greybus/svc.h @@ -9,6 +9,9 @@ #ifndef __SVC_H #define __SVC_H +#include +#include + #define GB_SVC_CPORT_FLAG_E2EFC BIT(0) #define GB_SVC_CPORT_FLAG_CSD_N BIT(1) #define GB_SVC_CPORT_FLAG_CSV_N BIT(2) -- cgit v1.2.3-71-gd317 From e003f9af9b8dc110dd62b7eaa3a796fd81d4d256 Mon Sep 17 00:00:00 2001 From: Rui Miguel Silva Date: Wed, 28 Aug 2019 13:48:25 +0100 Subject: staging: greybus: fix more header declarations More headers needed to be fixed when moving greybus out of staging and enabling the COMPILE_TEST option. Add forward declarations for the needed structures. Reported-by: kbuild test robot Signed-off-by: Rui Miguel Silva Reviewed-by: Johan Hovold Link: https://lore.kernel.org/r/20190828124825.20800-1-rui.silva@linaro.org Signed-off-by: Greg Kroah-Hartman --- include/linux/greybus/operation.h | 2 +- include/linux/greybus/svc.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h index 8ca864bba23e..cb8e4ef45222 100644 --- a/include/linux/greybus/operation.h +++ b/include/linux/greybus/operation.h @@ -15,7 +15,7 @@ #include #include - +struct gb_host_device; struct gb_operation; /* The default amount of time a request is given to complete */ diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h index 507f8bd4e4c8..5afaf5f06856 100644 --- a/include/linux/greybus/svc.h +++ b/include/linux/greybus/svc.h @@ -12,6 +12,8 @@ #include #include +struct gb_svc_l2_timer_cfg; + #define GB_SVC_CPORT_FLAG_E2EFC BIT(0) #define GB_SVC_CPORT_FLAG_CSD_N BIT(1) #define GB_SVC_CPORT_FLAG_CSV_N BIT(2) -- cgit v1.2.3-71-gd317 From a5876e24f13f13483fbd602b972d35801fb80b74 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 4 Sep 2019 10:08:56 +0800 Subject: erofs: use erofs_inode naming As Christoph suggested [1], "Why is this called vnode instead of inode? That seems like a rather odd naming for a Linux file system." [1] https://lore.kernel.org/r/20190829101545.GC20598@infradead.org/ Reported-by: Christoph Hellwig Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20190904020912.63925-10-gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman --- fs/erofs/data.c | 8 ++++---- fs/erofs/dir.c | 6 +++--- fs/erofs/inode.c | 12 ++++++------ fs/erofs/internal.h | 14 +++++++------- fs/erofs/namei.c | 2 +- fs/erofs/super.c | 10 +++++----- fs/erofs/xattr.c | 18 +++++++++--------- fs/erofs/xattr.h | 4 ++-- fs/erofs/zdata.c | 9 +++------ fs/erofs/zmap.c | 28 ++++++++++++++-------------- include/trace/events/erofs.h | 14 +++++++------- 11 files changed, 61 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 4d9b07991d07..be11b5ea9d2e 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -112,7 +112,7 @@ static int erofs_map_blocks_flatmode(struct inode *inode, int err = 0; erofs_blk_t nblocks, lastblk; u64 offset = map->m_la; - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); trace_erofs_map_blocks_flatmode_enter(inode, map, flags); @@ -170,7 +170,7 @@ err_out: int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map, int flags) { - if (erofs_inode_is_data_compressed(EROFS_V(inode)->datalayout)) { + if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { int err = z_erofs_map_blocks_iter(inode, map, flags); if (map->mpage) { @@ -365,7 +365,7 @@ static int erofs_raw_access_readpages(struct file *filp, if (IS_ERR(bio)) { pr_err("%s, readahead error at page %lu of nid %llu\n", __func__, page->index, - EROFS_V(mapping->host)->nid); + EROFS_I(mapping->host)->nid); bio = NULL; } @@ -404,7 +404,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; - if (EROFS_V(inode)->datalayout == EROFS_INODE_FLAT_INLINE) { + if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) { erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; if (block >> LOG_SECTORS_PER_BLOCK >= blks) diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index 6a5b43f7fb29..a032c8217071 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -47,7 +47,7 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, /* a corrupted entry is found */ if (nameoff + de_namelen > maxsize || de_namelen > EROFS_NAME_LEN) { - errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); + errln("bogus dirent @ nid %llu", EROFS_I(dir)->nid); DBG_BUGON(1); return -EFSCORRUPTED; } @@ -85,7 +85,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) break; } else if (IS_ERR(dentry_page)) { errln("fail to readdir of logical block %u of nid %llu", - i, EROFS_V(dir)->nid); + i, EROFS_I(dir)->nid); err = -EFSCORRUPTED; break; } @@ -97,7 +97,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) if (nameoff < sizeof(struct erofs_dirent) || nameoff >= PAGE_SIZE) { errln("%s, invalid de[0].nameoff %u @ nid %llu", - __func__, nameoff, EROFS_V(dir)->nid); + __func__, nameoff, EROFS_I(dir)->nid); err = -EFSCORRUPTED; goto skip_this; } diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 494b35e5830a..f6dfd0271261 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -11,7 +11,7 @@ /* no locking */ static int read_inode(struct inode *inode, void *data) { - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); struct erofs_inode_compact *dic = data; struct erofs_inode_extended *die; @@ -140,7 +140,7 @@ bogusimode: static int fill_inline_data(struct inode *inode, void *data, unsigned int m_pofs) { - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); struct erofs_sb_info *sbi = EROFS_I_SB(inode); /* should be tail-packing data inline */ @@ -178,7 +178,7 @@ static int fill_inline_data(struct inode *inode, void *data, static int fill_inode(struct inode *inode, int isdir) { struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); struct page *page; void *data; int err; @@ -260,7 +260,7 @@ static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) { const erofs_nid_t nid = *(erofs_nid_t *)opaque; - return EROFS_V(inode)->nid == nid; + return EROFS_I(inode)->nid == nid; } static int erofs_iget_set_actor(struct inode *inode, void *opaque) @@ -297,7 +297,7 @@ struct inode *erofs_iget(struct super_block *sb, if (inode->i_state & I_NEW) { int err; - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); vi->nid = nid; @@ -317,7 +317,7 @@ int erofs_getattr(const struct path *path, struct kstat *stat, { struct inode *const inode = d_inode(path->dentry); - if (erofs_inode_is_data_compressed(EROFS_V(inode)->datalayout)) + if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) stat->attributes |= STATX_ATTR_COMPRESSED; stat->attributes |= STATX_ATTR_IMMUTABLE; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 0f5cbf0a7570..10497ee07cae 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -272,14 +272,14 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) } /* atomic flag definitions */ -#define EROFS_V_EA_INITED_BIT 0 -#define EROFS_V_Z_INITED_BIT 1 +#define EROFS_I_EA_INITED_BIT 0 +#define EROFS_I_Z_INITED_BIT 1 /* bitlock definitions (arranged in reverse order) */ -#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) -#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) +#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1) +#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2) -struct erofs_vnode { +struct erofs_inode { erofs_nid_t nid; /* atomic flags (including bitlocks) */ @@ -307,8 +307,8 @@ struct erofs_vnode { struct inode vfs_inode; }; -#define EROFS_V(ptr) \ - container_of(ptr, struct erofs_vnode, vfs_inode) +#define EROFS_I(ptr) \ + container_of(ptr, struct erofs_inode, vfs_inode) static inline unsigned long inode_datablocks(struct inode *inode) { diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index c1068ad0535e..a6b6a4ab1403 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -117,7 +117,7 @@ static struct page *find_target_block_classic(struct inode *dir, kunmap_atomic(de); put_page(page); errln("corrupted dir block %d @ nid %llu", - mid, EROFS_V(dir)->nid); + mid, EROFS_I(dir)->nid); DBG_BUGON(1); page = ERR_PTR(-EFSCORRUPTED); goto out; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 499dc7f5d0e6..3986be582dbb 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -18,27 +18,27 @@ static struct kmem_cache *erofs_inode_cachep __read_mostly; static void init_once(void *ptr) { - struct erofs_vnode *vi = ptr; + struct erofs_inode *vi = ptr; inode_init_once(&vi->vfs_inode); } static struct inode *alloc_inode(struct super_block *sb) { - struct erofs_vnode *vi = + struct erofs_inode *vi = kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); if (!vi) return NULL; /* zero out everything except vfs_inode */ - memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); + memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); return &vi->vfs_inode; } static void free_inode(struct inode *inode) { - struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode *vi = EROFS_I(inode); /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ if (is_inode_fast_symlink(inode)) @@ -517,7 +517,7 @@ static int __init erofs_module_init(void) infoln("initializing erofs " EROFS_VERSION); erofs_inode_cachep = kmem_cache_create("erofs_inode", - sizeof(struct erofs_vnode), 0, + sizeof(struct erofs_inode), 0, SLAB_RECLAIM_ACCOUNT, init_once); if (!erofs_inode_cachep) { diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 620cbc15f4d0..d5b7fe0bee45 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -38,7 +38,7 @@ static inline void xattr_iter_end_final(struct xattr_iter *it) static int init_inode_xattrs(struct inode *inode) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct xattr_iter it; unsigned int i; struct erofs_xattr_ibody_header *ih; @@ -48,14 +48,14 @@ static int init_inode_xattrs(struct inode *inode) int ret = 0; /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) return 0; - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) return -ERESTARTSYS; /* someone has initialized xattrs for us? */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) goto out_unlock; /* @@ -136,10 +136,10 @@ static int init_inode_xattrs(struct inode *inode) } xattr_iter_end(&it, atomic_map); - set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + set_bit(EROFS_I_EA_INITED_BIT, &vi->flags); out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags); return ret; } @@ -184,7 +184,7 @@ static inline int xattr_iter_fixup(struct xattr_iter *it) static int inline_xattr_iter_begin(struct xattr_iter *it, struct inode *inode) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); unsigned int xattr_header_sz, inline_xattr_ofs; @@ -385,7 +385,7 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; struct erofs_sb_info *const sbi = EROFS_SB(sb); unsigned int i; @@ -608,7 +608,7 @@ static int inline_listxattr(struct listxattr_iter *it) static int shared_listxattr(struct listxattr_iter *it) { struct inode *const inode = d_inode(it->dentry); - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; struct erofs_sb_info *const sbi = EROFS_SB(sb); unsigned int i; diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index c5ca47d814dd..3585b84d2f20 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -16,8 +16,8 @@ static inline unsigned int inlinexattr_header_size(struct inode *inode) { - return sizeof(struct erofs_xattr_ibody_header) - + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; + return sizeof(struct erofs_xattr_ibody_header) + + sizeof(u32) * EROFS_I(inode)->xattr_shared_count; } static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 653bde0a619a..f06a2fad7af2 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -421,7 +421,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, else pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; + pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0]; pcl->clusterbits -= PAGE_SHIFT; /* new pclusters should be claimed as type 1, primary and followed */ @@ -1404,12 +1404,9 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, head = (void *)page_private(page); err = z_erofs_do_read_page(&f, page, &pagepool); - if (err) { - struct erofs_vnode *vi = EROFS_V(inode); - + if (err) errln("%s, readahead error at page %lu of nid %llu", - __func__, page->index, vi->nid); - } + __func__, page->index, EROFS_I(inode)->nid); put_page(page); } diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 6a06fb80ef3f..30a5171637d7 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -10,7 +10,7 @@ int z_erofs_fill_inode(struct inode *inode) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { vi->z_advise = 0; @@ -19,7 +19,7 @@ int z_erofs_fill_inode(struct inode *inode) vi->z_logical_clusterbits = LOG_BLOCK_SIZE; vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); + set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); } inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; @@ -28,7 +28,7 @@ int z_erofs_fill_inode(struct inode *inode) static int fill_inode_lazy(struct inode *inode) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; int err; erofs_off_t pos; @@ -36,14 +36,14 @@ static int fill_inode_lazy(struct inode *inode) void *kaddr; struct z_erofs_map_header *h; - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) return 0; - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) + if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) return -ERESTARTSYS; err = 0; - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) goto out_unlock; DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); @@ -83,13 +83,13 @@ static int fill_inode_lazy(struct inode *inode) vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + ((h->h_clusterbits >> 5) & 7); - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); + set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); unmap_done: kunmap_atomic(kaddr); unlock_page(page); put_page(page); out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); + clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); return err; } @@ -142,7 +142,7 @@ static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, unsigned long lcn) { struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); const erofs_off_t pos = Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + @@ -196,7 +196,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, unsigned int amortizedshift, unsigned int eofs) { - struct erofs_vnode *const vi = EROFS_V(m->inode); + struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; const unsigned int lomask = (1 << lclusterbits) - 1; unsigned int vcnt, base, lo, encodebits, nblk; @@ -260,7 +260,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, unsigned long lcn) { struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + vi->inode_isize + vi->xattr_isize, 8) + @@ -314,7 +314,7 @@ out: static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, unsigned int lcn) { - const unsigned int datamode = EROFS_V(m->inode)->datalayout; + const unsigned int datamode = EROFS_I(m->inode)->datalayout; if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) return vle_legacy_load_cluster_from_disk(m, lcn); @@ -328,7 +328,7 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, static int vle_extent_lookback(struct z_erofs_maprecorder *m, unsigned int lookback_distance) { - struct erofs_vnode *const vi = EROFS_V(m->inode); + struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_map_blocks *const map = m->map; const unsigned int lclusterbits = vi->z_logical_clusterbits; unsigned long lcn = m->lcn; @@ -374,7 +374,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags) { - struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_inode *const vi = EROFS_I(inode); struct z_erofs_maprecorder m = { .inode = inode, .map = map, diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index d239f39cbc8c..27f5caa6299a 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -41,7 +41,7 @@ TRACE_EVENT(erofs_lookup, TP_fast_assign( __entry->dev = dir->i_sb->s_dev; - __entry->nid = EROFS_V(dir)->nid; + __entry->nid = EROFS_I(dir)->nid; __entry->name = dentry->d_name.name; __entry->flags = flags; ), @@ -66,7 +66,7 @@ TRACE_EVENT(erofs_fill_inode, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; + __entry->nid = EROFS_I(inode)->nid; __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); __entry->isdir = isdir; @@ -95,7 +95,7 @@ TRACE_EVENT(erofs_readpage, TP_fast_assign( __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->nid = EROFS_V(page->mapping->host)->nid; + __entry->nid = EROFS_I(page->mapping->host)->nid; __entry->dir = S_ISDIR(page->mapping->host->i_mode); __entry->index = page->index; __entry->uptodate = PageUptodate(page); @@ -128,7 +128,7 @@ TRACE_EVENT(erofs_readpages, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; + __entry->nid = EROFS_I(inode)->nid; __entry->start = page->index; __entry->nrpage = nrpage; __entry->raw = raw; @@ -157,7 +157,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; + __entry->nid = EROFS_I(inode)->nid; __entry->la = map->m_la; __entry->llen = map->m_llen; __entry->flags = flags; @@ -203,7 +203,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; + __entry->nid = EROFS_I(inode)->nid; __entry->flags = flags; __entry->la = map->m_la; __entry->pa = map->m_pa; @@ -247,7 +247,7 @@ TRACE_EVENT(erofs_destroy_inode, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; + __entry->nid = EROFS_I(inode)->nid; ), TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) -- cgit v1.2.3-71-gd317