summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu_ida.h60
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/target/iscsi/iscsi_transport.h8
-rw-r--r--include/target/target_core_backend.h7
-rw-r--r--include/target/target_core_base.h26
-rw-r--r--include/target/target_core_fabric.h30
6 files changed, 128 insertions, 4 deletions
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
new file mode 100644
index 000000000000..0b23edbee309
--- /dev/null
+++ b/include/linux/percpu_ida.h
@@ -0,0 +1,60 @@
+#ifndef __PERCPU_IDA_H__
+#define __PERCPU_IDA_H__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+struct percpu_ida_cpu;
+
+struct percpu_ida {
+ /*
+ * number of tags available to be allocated, as passed to
+ * percpu_ida_init()
+ */
+ unsigned nr_tags;
+
+ struct percpu_ida_cpu __percpu *tag_cpu;
+
+ /*
+ * Bitmap of cpus that (may) have tags on their percpu freelists:
+ * steal_tags() uses this to decide when to steal tags, and which cpus
+ * to try stealing from.
+ *
+ * It's ok for a freelist to be empty when its bit is set - steal_tags()
+ * will just keep looking - but the bitmap _must_ be set whenever a
+ * percpu freelist does have tags.
+ */
+ cpumask_t cpus_have_tags;
+
+ struct {
+ spinlock_t lock;
+ /*
+ * When we go to steal tags from another cpu (see steal_tags()),
+ * we want to pick a cpu at random. Cycling through them every
+ * time we steal is a bit easier and more or less equivalent:
+ */
+ unsigned cpu_last_stolen;
+
+ /* For sleeping on allocation failure */
+ wait_queue_head_t wait;
+
+ /*
+ * Global freelist - it's a stack where nr_free points to the
+ * top
+ */
+ unsigned nr_free;
+ unsigned *freelist;
+ } ____cacheline_aligned_in_smp;
+};
+
+int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
+void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
+
+void percpu_ida_destroy(struct percpu_ida *pool);
+int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
+
+#endif /* __PERCPU_IDA_H__ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index d477bfb73fb9..66d42edfb3fc 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -144,6 +144,7 @@ enum scsi_timeouts {
#define ACCESS_CONTROL_IN 0x86
#define ACCESS_CONTROL_OUT 0x87
#define READ_16 0x88
+#define COMPARE_AND_WRITE 0x89
#define WRITE_16 0x8a
#define READ_ATTRIBUTE 0x8c
#define WRITE_ATTRIBUTE 0x8d
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e5d09d242ba3..a12589c4ee92 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,13 +6,13 @@ struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
char name[ISCSIT_TRANSPORT_NAME];
int transport_type;
+ int priv_size;
struct module *owner;
struct list_head t_node;
int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
void (*iscsit_free_conn)(struct iscsi_conn *);
- struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t);
int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
@@ -22,6 +22,11 @@ struct iscsit_transport {
int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
};
+static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
+{
+ return (void *)(cmd + 1);
+}
+
/*
* From iscsi_target_transport.c
*/
@@ -92,3 +97,4 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *, __be32);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index ffa2696d64dc..5ebe21cd5d1c 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -39,7 +39,8 @@ struct se_subsystem_api {
};
struct sbc_ops {
- sense_reason_t (*execute_rw)(struct se_cmd *cmd);
+ sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
+ u32, enum dma_data_direction);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
@@ -73,6 +74,10 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
+/* core helpers also used by xcopy during internal command setup */
+int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
+sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
+ struct scatterlist *, u32, struct scatterlist *, u32);
void array_free(void *array, int n);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e34fc904f2e1..5bdb8b7d2a69 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -5,11 +5,12 @@
#include <linux/configfs.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/percpu_ida.h>
#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
-#define TARGET_CORE_MOD_VERSION "v4.1.0-rc2-ml"
+#define TARGET_CORE_MOD_VERSION "v4.1.0"
#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
/* Maximum Number of LUNs per Target Portal Group */
@@ -96,6 +97,10 @@
* block/blk-lib.c:blkdev_issue_discard()
*/
#define DA_EMULATE_TPWS 0
+/* Emulation for CompareAndWrite (AtomicTestandSet) by default */
+#define DA_EMULATE_CAW 1
+/* Emulation for 3rd Party Copy (ExtendedCopy) by default */
+#define DA_EMULATE_3PC 1
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
@@ -158,6 +163,9 @@ enum se_cmd_flags_table {
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_ACK_KREF = 0x00040000,
+ SCF_COMPARE_AND_WRITE = 0x00080000,
+ SCF_COMPARE_AND_WRITE_POST = 0x00100000,
+ SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -196,6 +204,7 @@ enum tcm_sense_reason_table {
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
+ TCM_MISCOMPARE_VERIFY = R(0x14),
#undef R
};
@@ -415,6 +424,8 @@ struct se_cmd {
enum dma_data_direction data_direction;
/* For SAM Task Attribute */
int sam_task_attr;
+ /* Used for se_sess->sess_tag_pool */
+ unsigned int map_tag;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
unsigned cmd_wait_set:1;
@@ -444,11 +455,14 @@ struct se_cmd {
struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
- void (*transport_complete_callback)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
+ unsigned int t_task_nolb;
unsigned int transport_state;
#define CMD_T_ABORTED (1 << 0)
#define CMD_T_ACTIVE (1 << 1)
@@ -469,7 +483,9 @@ struct se_cmd {
struct work_struct work;
struct scatterlist *t_data_sg;
+ struct scatterlist *t_data_sg_orig;
unsigned int t_data_nents;
+ unsigned int t_data_nents_orig;
void *t_data_vmap;
struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents;
@@ -536,6 +552,8 @@ struct se_session {
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
struct kref sess_kref;
+ void *sess_cmd_map;
+ struct percpu_ida sess_tag_pool;
};
struct se_device;
@@ -589,6 +607,8 @@ struct se_dev_attrib {
int emulate_tas;
int emulate_tpu;
int emulate_tpws;
+ int emulate_caw;
+ int emulate_3pc;
int enforce_pr_isids;
int is_nonrot;
int emulate_rest_reord;
@@ -656,6 +676,7 @@ struct se_device {
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
+ struct semaphore caw_sem;
/* Used for legacy SPC-2 reservationsa */
struct se_node_acl *dev_reserved_node_acl;
/* Used for ALUA Logical Unit Group membership */
@@ -669,6 +690,7 @@ struct se_device {
struct list_head delayed_cmd_list;
struct list_head state_list;
struct list_head qf_cmd_list;
+ struct list_head g_dev_node;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
/* T10 Inquiry and VPD WWN Information */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 7a16178424f9..882b650e32be 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -84,6 +84,9 @@ struct target_core_fabric_ops {
};
struct se_session *transport_init_session(void);
+int transport_alloc_session_tags(struct se_session *, unsigned int,
+ unsigned int);
+struct se_session *transport_init_session_tags(unsigned int, unsigned int);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
@@ -131,6 +134,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
+void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u32);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
@@ -175,4 +179,30 @@ u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *
char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
u32 *, char **);
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ). However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static inline enum dma_data_direction
+target_reverse_dma_direction(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_BIDI)
+ return DMA_BIDIRECTIONAL;
+
+ switch (se_cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE:
+ return DMA_TO_DEVICE;
+ case DMA_NONE:
+ default:
+ return DMA_NONE;
+ }
+}
+
#endif /* TARGET_CORE_FABRICH */