diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/agp/amd64-agp.c | 2 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmb_dev_int.c | 5 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ipmb.c | 6 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 118 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_poweroff.c | 8 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 22 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ssif.c | 33 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 28 | ||||
-rw-r--r-- | drivers/char/random.c | 1409 | ||||
-rw-r--r-- | drivers/char/tpm/tpm2-cmd.c | 17 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_ftpm_tee.c | 2 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_ibmvtpm.c | 1 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis.c | 67 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_core.h | 58 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_i2c_cr50.c | 11 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_spi.h | 4 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_spi_cr50.c | 7 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_spi_main.c | 45 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis_synquacer.c | 98 | ||||
-rw-r--r-- | drivers/char/tpm/xen-tpmfront.c | 18 |
20 files changed, 902 insertions, 1057 deletions
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index dc78a4fb879e..84a4aa9312cf 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -327,7 +327,7 @@ static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) { int i; - if (amd_cache_northbridges() < 0) + if (!amd_nb_num()) return -ENODEV; if (!amd_nb_has_feature(AMD_NB_GART)) diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 49b8f22fdcf0..db40037eb347 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -299,8 +299,7 @@ static int ipmb_slave_cb(struct i2c_client *client, return 0; } -static int ipmb_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ipmb_probe(struct i2c_client *client) { struct ipmb_dev *ipmb_dev; int ret; @@ -369,7 +368,7 @@ static struct i2c_driver ipmb_driver = { .name = "ipmb-dev", .acpi_match_table = ACPI_PTR(acpi_ipmb_id), }, - .probe = ipmb_probe, + .probe_new = ipmb_probe, .remove = ipmb_remove, .id_table = ipmb_id, }; diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c index b81b862532fb..ab19b4b3317e 100644 --- a/drivers/char/ipmi/ipmi_ipmb.c +++ b/drivers/char/ipmi/ipmi_ipmb.c @@ -442,8 +442,7 @@ static int ipmi_ipmb_remove(struct i2c_client *client) return 0; } -static int ipmi_ipmb_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ipmi_ipmb_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ipmi_ipmb_dev *iidev; @@ -476,6 +475,7 @@ static int ipmi_ipmb_probe(struct i2c_client *client, slave_np = of_parse_phandle(dev->of_node, "slave-dev", 0); if (slave_np) { slave_adap = of_get_i2c_adapter_by_node(slave_np); + of_node_put(slave_np); if (!slave_adap) { dev_notice(&client->dev, "Could not find slave adapter\n"); @@ -570,7 +570,7 @@ static struct i2c_driver ipmi_ipmb_driver = { .name = DEVICE_NAME, .of_match_table = of_ipmi_ipmb_match, }, - .probe = ipmi_ipmb_probe, + .probe_new = ipmi_ipmb_probe, .remove = ipmi_ipmb_remove, .id_table = ipmi_ipmb_id, }; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c59265146e9c..703433493c85 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -11,8 +11,8 @@ * Copyright 2002 MontaVista Software Inc. */ -#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " -#define dev_fmt pr_fmt +#define pr_fmt(fmt) "IPMI message handler: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) #include <linux/module.h> #include <linux/errno.h> @@ -145,6 +145,18 @@ module_param(default_max_retries, uint, 0644); MODULE_PARM_DESC(default_max_retries, "The time (milliseconds) between retry sends in maintenance mode"); +/* The default maximum number of users that may register. */ +static unsigned int max_users = 30; +module_param(max_users, uint, 0644); +MODULE_PARM_DESC(max_users, + "The most users that may use the IPMI stack at one time."); + +/* The default maximum number of message a user may have outstanding. */ +static unsigned int max_msgs_per_user = 100; +module_param(max_msgs_per_user, uint, 0644); +MODULE_PARM_DESC(max_msgs_per_user, + "The most message a user may have outstanding."); + /* Call every ~1000 ms. */ #define IPMI_TIMEOUT_TIME 1000 @@ -187,6 +199,8 @@ struct ipmi_user { /* Does this interface receive IPMI events? */ bool gets_events; + atomic_t nr_msgs; + /* Free must run in process context for RCU cleanup. */ struct work_struct remove_work; }; @@ -442,6 +456,10 @@ struct ipmi_smi { */ struct list_head users; struct srcu_struct users_srcu; + atomic_t nr_users; + struct device_attribute nr_users_devattr; + struct device_attribute nr_msgs_devattr; + /* Used for wake ups at startup. */ wait_queue_head_t waitq; @@ -927,11 +945,13 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); + atomic_dec(&msg->user->nr_msgs); } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); if (user) { + atomic_dec(&user->nr_msgs); user->handler->ipmi_recv_hndl(msg, user->handler_data); release_ipmi_user(user, index); } else { @@ -1230,6 +1250,11 @@ int ipmi_create_user(unsigned int if_num, goto out_kfree; found: + if (atomic_add_return(1, &intf->nr_users) > max_users) { + rv = -EBUSY; + goto out_kfree; + } + INIT_WORK(&new_user->remove_work, free_user_work); rv = init_srcu_struct(&new_user->release_barrier); @@ -1244,6 +1269,7 @@ int ipmi_create_user(unsigned int if_num, /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); + atomic_set(&new_user->nr_msgs, 0); kref_init(&new_user->refcount); new_user->handler = handler; new_user->handler_data = handler_data; @@ -1262,6 +1288,7 @@ int ipmi_create_user(unsigned int if_num, return 0; out_kfree: + atomic_dec(&intf->nr_users); srcu_read_unlock(&ipmi_interfaces_srcu, index); vfree(new_user); return rv; @@ -1336,6 +1363,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); list_del_rcu(&user->link); + atomic_dec(&intf->nr_users); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse @@ -2284,6 +2312,14 @@ static int i_ipmi_request(struct ipmi_user *user, struct ipmi_recv_msg *recv_msg; int rv = 0; + if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + /* Decrement will happen at the end of the routine. */ + rv = -EBUSY; + goto out; + } + } + if (supplied_recv) recv_msg = supplied_recv; else { @@ -2296,7 +2332,7 @@ static int i_ipmi_request(struct ipmi_user *user, recv_msg->user_msg_data = user_msg_data; if (supplied_smi) - smi_msg = (struct ipmi_smi_msg *) supplied_smi; + smi_msg = supplied_smi; else { smi_msg = ipmi_alloc_smi_msg(); if (smi_msg == NULL) { @@ -2348,13 +2384,16 @@ out_err: ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); } else { - pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); + dev_dbg(intf->si_dev, "Send: %*ph\n", + smi_msg->data_size, smi_msg->data); smi_send(intf, intf->handlers, smi_msg, priority); } rcu_read_unlock(); out: + if (rv && user) + atomic_dec(&user->nr_msgs); return rv; } @@ -3471,6 +3510,36 @@ void ipmi_poll_interface(struct ipmi_user *user) } EXPORT_SYMBOL(ipmi_poll_interface); +static ssize_t nr_users_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipmi_smi *intf = container_of(attr, + struct ipmi_smi, nr_users_devattr); + + return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); +} +static DEVICE_ATTR_RO(nr_users); + +static ssize_t nr_msgs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipmi_smi *intf = container_of(attr, + struct ipmi_smi, nr_msgs_devattr); + struct ipmi_user *user; + int index; + unsigned int count = 0; + + index = srcu_read_lock(&intf->users_srcu); + list_for_each_entry_rcu(user, &intf->users, link) + count += atomic_read(&user->nr_msgs); + srcu_read_unlock(&intf->users_srcu, index); + + return sysfs_emit(buf, "%u\n", count); +} +static DEVICE_ATTR_RO(nr_msgs); + static void redo_bmc_reg(struct work_struct *work) { struct ipmi_smi *intf = container_of(work, struct ipmi_smi, @@ -3529,6 +3598,7 @@ int ipmi_add_smi(struct module *owner, if (slave_addr != 0) intf->addrinfo[0].address = slave_addr; INIT_LIST_HEAD(&intf->users); + atomic_set(&intf->nr_users, 0); intf->handlers = handlers; intf->send_info = send_info; spin_lock_init(&intf->seq_lock); @@ -3592,6 +3662,20 @@ int ipmi_add_smi(struct module *owner, if (rv) goto out_err_bmc_reg; + intf->nr_users_devattr = dev_attr_nr_users; + sysfs_attr_init(&intf->nr_users_devattr.attr); + rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); + if (rv) + goto out_err_bmc_reg; + + intf->nr_msgs_devattr = dev_attr_nr_msgs; + sysfs_attr_init(&intf->nr_msgs_devattr.attr); + rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); + if (rv) { + device_remove_file(intf->si_dev, &intf->nr_users_devattr); + goto out_err_bmc_reg; + } + /* * Keep memory order straight for RCU readers. Make * sure everything else is committed to memory before @@ -3677,8 +3761,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) void ipmi_unregister_smi(struct ipmi_smi *intf) { struct ipmi_smi_watcher *w; - int intf_num = intf->intf_num, index; + int intf_num, index; + if (!intf) + return; + intf_num = intf->intf_num; mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; intf->in_shutdown = true; @@ -3688,6 +3775,9 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) /* At this point no users can be added to the interface. */ + device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); + device_remove_file(intf->si_dev, &intf->nr_users_devattr); + /* * Call all the watcher interfaces to tell them that * an interface is going away. @@ -3836,7 +3926,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, msg->data[10] = ipmb_checksum(&msg->data[6], 4); msg->data_size = 11; - pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); + dev_dbg(intf->si_dev, "Invalid command: %*ph\n", + msg->data_size, msg->data); rcu_read_lock(); if (!intf->in_shutdown) { @@ -3989,10 +4080,10 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_ipmb_direct_addr *daddr; - recv_msg = (struct ipmi_recv_msg *) msg->user_data; + recv_msg = msg->user_data; if (recv_msg == NULL) { dev_warn(intf->si_dev, - "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); + "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); return 0; } @@ -4407,10 +4498,10 @@ static int handle_bmc_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_system_interface_addr *smi_addr; - recv_msg = (struct ipmi_recv_msg *) msg->user_data; + recv_msg = msg->user_data; if (recv_msg == NULL) { dev_warn(intf->si_dev, - "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); + "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); return 0; } @@ -4444,7 +4535,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, unsigned char cc; bool is_cmd = !((msg->rsp[0] >> 2) & 1); - pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); + dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); if (msg->rsp_size < 2) { /* Message is too small to be correct. */ @@ -4518,6 +4609,8 @@ return_unspecified: } else /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); + requeue = 0; + goto out; } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) || (msg->rsp[1] != msg->data[1])) { /* @@ -4826,7 +4919,8 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, smi_msg->data_size = recv_msg->msg.data_len; smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); - pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); + dev_dbg(intf->si_dev, "Resend: %*ph\n", + smi_msg->data_size, smi_msg->data); return smi_msg; } diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index bc3a18daf97a..163ec9749e55 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -94,12 +94,8 @@ static void dummy_recv_free(struct ipmi_recv_msg *msg) { atomic_dec(&dummy_count); } -static struct ipmi_smi_msg halt_smi_msg = { - .done = dummy_smi_free -}; -static struct ipmi_recv_msg halt_recv_msg = { - .done = dummy_recv_free -}; +static struct ipmi_smi_msg halt_smi_msg = INIT_IPMI_SMI_MSG(dummy_smi_free); +static struct ipmi_recv_msg halt_recv_msg = INIT_IPMI_RECV_MSG(dummy_recv_free); /* diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 64dedb3ef8ec..6e357ad76f2e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -264,15 +264,16 @@ static void cleanup_one_si(struct smi_info *smi_info); static void cleanup_ipmi_si(void); #ifdef DEBUG_TIMING -void debug_timestamp(char *msg) +void debug_timestamp(struct smi_info *smi_info, char *msg) { struct timespec64 t; ktime_get_ts64(&t); - pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec); + dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", + msg, t.tv_sec, t.tv_nsec); } #else -#define debug_timestamp(x) +#define debug_timestamp(smi_info, x) #endif static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); @@ -318,7 +319,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) smi_info->curr_msg = smi_info->waiting_msg; smi_info->waiting_msg = NULL; - debug_timestamp("Start2"); + debug_timestamp(smi_info, "Start2"); err = atomic_notifier_call_chain(&xaction_notifier_list, 0, smi_info); if (err & NOTIFY_STOP_MASK) { @@ -538,7 +539,7 @@ static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; - debug_timestamp("Done"); + debug_timestamp(smi_info, "Done"); switch (smi_info->si_state) { case SI_NORMAL: if (!smi_info->curr_msg) @@ -901,7 +902,7 @@ static void sender(void *send_info, struct smi_info *smi_info = send_info; unsigned long flags; - debug_timestamp("Enqueue"); + debug_timestamp(smi_info, "Enqueue"); if (smi_info->run_to_completion) { /* @@ -1079,7 +1080,7 @@ static void smi_timeout(struct timer_list *t) long timeout; spin_lock_irqsave(&(smi_info->si_lock), flags); - debug_timestamp("Timer"); + debug_timestamp(smi_info, "Timer"); jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) @@ -1128,7 +1129,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data) smi_inc_stat(smi_info, interrupts); - debug_timestamp("Interrupt"); + debug_timestamp(smi_info, "Interrupt"); smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); @@ -2220,10 +2221,7 @@ static void cleanup_one_si(struct smi_info *smi_info) return; list_del(&smi_info->link); - - if (smi_info->intf) - ipmi_unregister_smi(smi_info->intf); - + ipmi_unregister_smi(smi_info->intf); kfree(smi_info); } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f199cc194844..fc742ee9c046 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -814,6 +814,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, break; case SSIF_GETTING_EVENTS: + if (!msg) { + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "No message set while getting events\n"); + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + } + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); @@ -838,6 +846,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, break; case SSIF_GETTING_MESSAGES: + if (!msg) { + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "No message set while getting messages\n"); + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + } + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); @@ -861,6 +877,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, deliver_recv_msg(ssif_info, msg); } break; + + default: + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "Invalid state in message done handling: %d\n", + ssif_info->ssif_state); + ipmi_ssif_unlock_cond(ssif_info, flags); } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); @@ -1053,7 +1076,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) static void sender(void *send_info, struct ipmi_smi_msg *msg) { - struct ssif_info *ssif_info = (struct ssif_info *) send_info; + struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; BUG_ON(ssif_info->waiting_msg); @@ -1090,7 +1113,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) */ static void request_events(void *send_info) { - struct ssif_info *ssif_info = (struct ssif_info *) send_info; + struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; if (!ssif_info->has_event_buffer) @@ -1107,7 +1130,7 @@ static void request_events(void *send_info) */ static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) { - struct ssif_info *ssif_info = (struct ssif_info *) send_info; + struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; long timeout = 0; @@ -1619,7 +1642,7 @@ static int ssif_check_and_remove(struct i2c_client *client, return 0; } -static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int ssif_probe(struct i2c_client *client) { unsigned char msg[3]; unsigned char *resp; @@ -2037,7 +2060,7 @@ static struct i2c_driver ssif_i2c_driver = { .driver = { .name = DEVICE_NAME }, - .probe = ssif_probe, + .probe_new = ssif_probe, .remove = ssif_remove, .alert = ssif_alert, .id_table = ssif_id, diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 0604abdd249a..5b4e677929ca 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -354,12 +354,8 @@ static void msg_free_recv(struct ipmi_recv_msg *msg) complete(&msg_wait); } } -static struct ipmi_smi_msg smi_msg = { - .done = msg_free_smi -}; -static struct ipmi_recv_msg recv_msg = { - .done = msg_free_recv -}; +static struct ipmi_smi_msg smi_msg = INIT_IPMI_SMI_MSG(msg_free_smi); +static struct ipmi_recv_msg recv_msg = INIT_IPMI_RECV_MSG(msg_free_recv); static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, @@ -475,12 +471,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg) atomic_dec(&panic_done_count); } -static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = { - .done = panic_smi_free -}; -static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = { - .done = panic_recv_free -}; +static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = + INIT_IPMI_SMI_MSG(panic_smi_free); +static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = + INIT_IPMI_RECV_MSG(panic_recv_free); static void panic_halt_ipmi_heartbeat(void) { @@ -516,12 +510,10 @@ static void panic_halt_ipmi_heartbeat(void) atomic_sub(2, &panic_done_count); } -static struct ipmi_smi_msg panic_halt_smi_msg = { - .done = panic_smi_free -}; -static struct ipmi_recv_msg panic_halt_recv_msg = { - .done = panic_recv_free -}; +static struct ipmi_smi_msg panic_halt_smi_msg = + INIT_IPMI_SMI_MSG(panic_smi_free); +static struct ipmi_recv_msg panic_halt_recv_msg = + INIT_IPMI_RECV_MSG(panic_recv_free); /* * Special call, doesn't claim any locks. This is only to be called diff --git a/drivers/char/random.c b/drivers/char/random.c index 1d8242969751..b691b9d59503 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -15,14 +15,12 @@ * - Sysctl interface. * * The high level overview is that there is one input pool, into which - * various pieces of data are hashed. Some of that data is then "credited" as - * having a certain number of bits of entropy. When enough bits of entropy are - * available, the hash is finalized and handed as a key to a stream cipher that - * expands it indefinitely for various consumers. This key is periodically - * refreshed as the various entropy collectors, described below, add data to the - * input pool and credit it. There is currently no Fortuna-like scheduler - * involved, which can lead to malicious entropy sources causing a premature - * reseed, and the entropy estimates are, at best, conservative guesses. + * various pieces of data are hashed. Prior to initialization, some of that + * data is then "credited" as having a certain number of bits of entropy. + * When enough bits of entropy are available, the hash is finalized and + * handed as a key to a stream cipher that expands it indefinitely for + * various consumers. This key is periodically refreshed as the various + * entropy collectors, described below, add data to the input pool. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -53,6 +51,8 @@ #include <linux/completion.h> #include <linux/uuid.h> #include <linux/uaccess.h> +#include <linux/suspend.h> +#include <linux/siphash.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> #include <asm/processor.h> @@ -71,27 +71,25 @@ *********************************************************************/ /* - * crng_init = 0 --> Uninitialized - * 1 --> Initialized - * 2 --> Initialized from input_pool - * * crng_init is protected by base_crng->lock, and only increases - * its value (from 0->1->2). + * its value (from empty->early->ready). */ -static int crng_init = 0; -#define crng_ready() (likely(crng_init > 1)) -/* Various types of waiters for crng_init->2 transition. */ +static enum { + CRNG_EMPTY = 0, /* Little to no entropy collected */ + CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ + CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ +} crng_init __read_mostly = CRNG_EMPTY; +static DEFINE_STATIC_KEY_FALSE(crng_is_ready); +#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY) +/* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; -static DEFINE_SPINLOCK(random_ready_chain_lock); -static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ -static struct ratelimit_state unseeded_warning = - RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); static struct ratelimit_state urandom_warning = RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); -static int ratelimit_disable __read_mostly; +static int ratelimit_disable __read_mostly = + IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); @@ -110,6 +108,11 @@ bool rng_is_initialized(void) } EXPORT_SYMBOL(rng_is_initialized); +static void __cold crng_set_ready(struct work_struct *work) +{ + static_branch_enable(&crng_is_ready); +} + /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ static void try_to_generate_entropy(void); @@ -137,73 +140,10 @@ int wait_for_random_bytes(void) } EXPORT_SYMBOL(wait_for_random_bytes); -/* - * Add a callback function that will be invoked when the input - * pool is initialised. - * - * returns: 0 if callback is successfully added - * -EALREADY if pool is already initialised (callback not called) - */ -int register_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret = -EALREADY; - - if (crng_ready()) - return ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - if (!crng_ready()) - ret = raw_notifier_chain_register(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -/* - * Delete a previously registered readiness callback function. - */ -int unregister_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - ret = raw_notifier_chain_unregister(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -static void process_random_ready_list(void) -{ - unsigned long flags; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - raw_notifier_call_chain(&random_ready_chain, 0, NULL); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); -} - -#define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) - -static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) -{ -#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM - const bool print_once = false; -#else - static bool print_once __read_mostly; -#endif - - if (print_once || crng_ready() || - (previous && (caller == READ_ONCE(*previous)))) - return; - WRITE_ONCE(*previous, caller); -#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM - print_once = true; -#endif - if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); -} +#define warn_unseeded_randomness() \ + if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ + __func__, (void *)_RET_IP_, crng_init) /********************************************************************* @@ -216,7 +156,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * * There are a few exported interfaces for use by other drivers: * - * void get_random_bytes(void *buf, size_t nbytes) + * void get_random_bytes(void *buf, size_t len) * u32 get_random_u32() * u64 get_random_u64() * unsigned int get_random_int() @@ -232,8 +172,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void *********************************************************************/ enum { - CRNG_RESEED_INTERVAL = 300 * HZ, - CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE + CRNG_RESEED_START_INTERVAL = HZ, + CRNG_RESEED_INTERVAL = 60 * HZ }; static struct { @@ -256,24 +196,17 @@ static DEFINE_PER_CPU(struct crng, crngs) = { .lock = INIT_LOCAL_LOCK(crngs.lock), }; -/* Used by crng_reseed() to extract a new seed from the input pool. */ -static bool drain_entropy(void *buf, size_t nbytes, bool force); +/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ +static void extract_entropy(void *buf, size_t len); -/* - * This extracts a new crng key from the input pool, but only if there is a - * sufficient amount of entropy available or force is true, in order to - * mitigate bruteforcing of newly added bits. - */ -static void crng_reseed(bool force) +/* This extracts a new crng key from the input pool. */ +static void crng_reseed(void) { unsigned long flags; unsigned long next_gen; u8 key[CHACHA_KEY_SIZE]; - bool finalize_init = false; - /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ - if (!drain_entropy(key, sizeof(key), force)) - return; + extract_entropy(key, sizeof(key)); /* * We copy the new key into the base_crng, overwriting the old one, @@ -288,28 +221,10 @@ static void crng_reseed(bool force) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); WRITE_ONCE(base_crng.birth, jiffies); - if (!crng_ready()) { - crng_init = 2; - finalize_init = true; - } + if (!static_branch_likely(&crng_is_ready)) + crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); memzero_explicit(key, sizeof(key)); - if (finalize_init) { - process_random_ready_list(); - wake_up_interruptible(&crng_init_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); - pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } - } } /* @@ -318,6 +233,13 @@ static void crng_reseed(bool force) * the resultant ChaCha state to the user, along with the second * half of the block containing 32 bytes of random data that may * be used; random_data_len may not be greater than 32. + * + * The returned ChaCha state contains within it a copy of the old + * key value, at index 4, so the state should always be zeroed out + * immediately after using in order to maintain forward secrecy. + * If the state cannot be erased in a timely manner, then it is + * safer to set the random_data parameter to &chacha_state[4] so + * that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], u32 chacha_state[CHACHA_STATE_WORDS], @@ -338,10 +260,10 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], } /* - * Return whether the crng seed is considered to be sufficiently - * old that a reseeding might be attempted. This happens if the last - * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at - * an interval proportional to the uptime. + * Return whether the crng seed is considered to be sufficiently old + * that a reseeding is needed. This happens if the last reseeding + * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval + * proportional to the uptime. */ static bool crng_has_old_seed(void) { @@ -353,10 +275,10 @@ static bool crng_has_old_seed(void) if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) WRITE_ONCE(early_boot, false); else - interval = max_t(unsigned int, 5 * HZ, + interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, (unsigned int)uptime / 2 * HZ); } - return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); + return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval); } /* @@ -375,28 +297,31 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], /* * For the fast path, we check whether we're ready, unlocked first, and * then re-check once locked later. In the case where we're really not - * ready, we do fast key erasure with the base_crng directly, because - * this is what crng_pre_init_inject() mutates during early init. + * ready, we do fast key erasure with the base_crng directly, extracting + * when crng_init is CRNG_EMPTY. */ if (!crng_ready()) { bool ready; spin_lock_irqsave(&base_crng.lock, flags); ready = crng_ready(); - if (!ready) + if (!ready) { + if (crng_init == CRNG_EMPTY) + extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_fast_key_erasure(base_crng.key, chacha_state, random_data, random_data_len); + } spin_unlock_irqrestore(&base_crng.lock, flags); if (!ready) return; } /* - * If the base_crng is old enough, we try to reseed, which in turn - * bumps the generation counter that we check below. + * If the base_crng is old enough, we reseed, which in turn bumps the + * generation counter that we check below. */ if (unlikely(crng_has_old_seed())) - crng_reseed(false); + crng_reseed(); local_lock_irqsave(&crngs.lock, flags); crng = raw_cpu_ptr(&crngs); @@ -426,76 +351,24 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], local_unlock_irqrestore(&crngs.lock, flags); } -/* - * This function is for crng_init == 0 only. It loads entropy directly - * into the crng's key, without going through the input pool. It is, - * generally speaking, not very safe, but we use this only at early - * boot time when it's better to have something there rather than - * nothing. - * - * If account is set, then the crng_init_cnt counter is incremented. - * This shouldn't be set by functions like add_device_randomness(), - * where we can't trust the buffer passed to it is guaranteed to be - * unpredictable (so it might not have any entropy at all). - * - * Returns the number of bytes processed from input, which is bounded - * by CRNG_INIT_CNT_THRESH if account is true. - */ -static size_t crng_pre_init_inject(const void *input, size_t len, bool account) -{ - static int crng_init_cnt = 0; - struct blake2s_state hash; - unsigned long flags; - - blake2s_init(&hash, sizeof(base_crng.key)); - - spin_lock_irqsave(&base_crng.lock, flags); - if (crng_init != 0) { - spin_unlock_irqrestore(&base_crng.lock, flags); - return 0; - } - - if (account) - len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); - - blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); - blake2s_update(&hash, input, len); - blake2s_final(&hash, base_crng.key); - - if (account) { - crng_init_cnt += len; - if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { - ++base_crng.generation; - crng_init = 1; - } - } - - spin_unlock_irqrestore(&base_crng.lock, flags); - - if (crng_init == 1) - pr_notice("fast init done\n"); - - return len; -} - -static void _get_random_bytes(void *buf, size_t nbytes) +static void _get_random_bytes(void *buf, size_t len) { u32 chacha_state[CHACHA_STATE_WORDS]; u8 tmp[CHACHA_BLOCK_SIZE]; - size_t len; + size_t first_block_len; - if (!nbytes) + if (!len) return; - len = min_t(size_t, 32, nbytes); - crng_make_state(chacha_state, buf, len); - nbytes -= len; - buf += len; + first_block_len = min_t(size_t, 32, len); + crng_make_state(chacha_state, buf, first_block_len); + len -= first_block_len; + buf += first_block_len; - while (nbytes) { - if (nbytes < CHACHA_BLOCK_SIZE) { + while (len) { + if (len < CHACHA_BLOCK_SIZE) { chacha20_block(chacha_state, tmp); - memcpy(buf, tmp, nbytes); + memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } @@ -503,7 +376,7 @@ static void _get_random_bytes(void *buf, size_t nbytes) chacha20_block(chacha_state, buf); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - nbytes -= CHACHA_BLOCK_SIZE; + len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } @@ -513,67 +386,64 @@ static void _get_random_bytes(void *buf, size_t nbytes) /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not rely on the hardware random - * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * TCP sequence numbers, etc. In order to ensure that the randomness + * by this function is okay, the function wait_for_random_bytes() + * should be called and return 0 at least once at any point prior. */ -void get_random_bytes(void *buf, size_t nbytes) +void get_random_bytes(void *buf, size_t len) { - static void *previous; - - warn_unseeded_randomness(&previous); - _get_random_bytes(buf, nbytes); + warn_unseeded_randomness(); + _get_random_bytes(buf, len); } EXPORT_SYMBOL(get_random_bytes); -static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) +static ssize_t get_random_bytes_user(struct iov_iter *iter) { - bool large_request = nbytes > 256; - ssize_t ret = 0; - size_t len; u32 chacha_state[CHACHA_STATE_WORDS]; - u8 output[CHACHA_BLOCK_SIZE]; + u8 block[CHACHA_BLOCK_SIZE]; + size_t ret = 0, copied; - if (!nbytes) + if (unlikely(!iov_iter_count(iter))) return 0; - len = min_t(size_t, 32, nbytes); - crng_make_state(chacha_state, output, len); - - if (copy_to_user(buf, output, len)) - return -EFAULT; - nbytes -= len; - buf += len; - ret += len; - - while (nbytes) { - if (large_request && need_resched()) { - if (signal_pending(current)) - break; - schedule(); - } + /* + * Immediately overwrite the ChaCha key at index 4 with random + * bytes, in case userspace causes copy_to_user() below to sleep + * forever, so that we still retain forward secrecy in that case. + */ + crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + /* + * However, if we're doing a read of len <= 32, we don't need to + * use chacha_state after, so we can simply return those bytes to + * the user directly. + */ + if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { + ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + goto out_zero_chacha; + } - chacha20_block(chacha_state, output); + for (;;) { + chacha20_block(chacha_state, block); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); - if (copy_to_user(buf, output, len)) { - ret = -EFAULT; + copied = copy_to_iter(block, sizeof(block), iter); + ret += copied; + if (!iov_iter_count(iter) || copied != sizeof(block)) break; - } - nbytes -= len; - buf += len; - ret += len; + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); + } } + memzero_explicit(block, sizeof(block)); +out_zero_chacha: memzero_explicit(chacha_state, sizeof(chacha_state)); - memzero_explicit(output, sizeof(output)); - return ret; + return ret ? ret : -EFAULT; } /* @@ -582,98 +452,69 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) * provided by this function is okay, the function wait_for_random_bytes() * should be called and return 0 at least once at any point prior. */ -struct batched_entropy { - union { - /* - * We make this 1.5x a ChaCha block, so that we get the - * remaining 32 bytes from fast key erasure, plus one full - * block from the detached ChaCha state. We can increase - * the size of this later if needed so long as we keep the - * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. - */ - u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; - u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; - }; - local_lock_t lock; - unsigned long generation; - unsigned int position; -}; - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { - .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), - .position = UINT_MAX -}; - -u64 get_random_u64(void) -{ - u64 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - unsigned long next_gen; - - warn_unseeded_randomness(&previous); - - local_lock_irqsave(&batched_entropy_u64.lock, flags); - batch = raw_cpu_ptr(&batched_entropy_u64); - - next_gen = READ_ONCE(base_crng.generation); - if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || - next_gen != batch->generation) { - _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); - batch->position = 0; - batch->generation = next_gen; - } - - ret = batch->entropy_u64[batch->position]; - batch->entropy_u64[batch->position] = 0; - ++batch->position; - local_unlock_irqrestore(&batched_entropy_u64.lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u64); - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { - .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), - .position = UINT_MAX -}; - -u32 get_random_u32(void) -{ - u32 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - unsigned long next_gen; - - warn_unseeded_randomness(&previous); - - local_lock_irqsave(&batched_entropy_u32.lock, flags); - batch = raw_cpu_ptr(&batched_entropy_u32); - - next_gen = READ_ONCE(base_crng.generation); - if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || - next_gen != batch->generation) { - _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); - batch->position = 0; - batch->generation = next_gen; - } - - ret = batch->entropy_u32[batch->position]; - batch->entropy_u32[batch->position] = 0; - ++batch->position; - local_unlock_irqrestore(&batched_entropy_u32.lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u32); +#define DEFINE_BATCHED_ENTROPY(type) \ +struct batch_ ##type { \ + /* \ + * We make this 1.5x a ChaCha block, so that we get the \ + * remaining 32 bytes from fast key erasure, plus one full \ + * block from the detached ChaCha state. We can increase \ + * the size of this later if needed so long as we keep the \ + * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ + */ \ + type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ + local_lock_t lock; \ + unsigned long generation; \ + unsigned int position; \ +}; \ + \ +static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ + .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ + .position = UINT_MAX \ +}; \ + \ +type get_random_ ##type(void) \ +{ \ + type ret; \ + unsigned long flags; \ + struct batch_ ##type *batch; \ + unsigned long next_gen; \ + \ + warn_unseeded_randomness(); \ + \ + if (!crng_ready()) { \ + _get_random_bytes(&ret, sizeof(ret)); \ + return ret; \ + } \ + \ + local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ + batch = raw_cpu_ptr(&batched_entropy_##type); \ + \ + next_gen = READ_ONCE(base_crng.generation); \ + if (batch->position >= ARRAY_SIZE(batch->entropy) || \ + next_gen != batch->generation) { \ + _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ + batch->position = 0; \ + batch->generation = next_gen; \ + } \ + \ + ret = batch->entropy[batch->position]; \ + batch->entropy[batch->position] = 0; \ + ++batch->position; \ + local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(get_random_ ##type); + +DEFINE_BATCHED_ENTROPY(u64) +DEFINE_BATCHED_ENTROPY(u32) #ifdef CONFIG_SMP /* * This function is called when the CPU is coming up, with entry * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. */ -int random_prepare_cpu(unsigned int cpu) +int __cold random_prepare_cpu(unsigned int cpu) { /* * When the cpu comes back online, immediately invalidate both @@ -687,65 +528,6 @@ int random_prepare_cpu(unsigned int cpu) } #endif -/** - * randomize_page - Generate a random, page aligned address - * @start: The smallest acceptable address the caller will take. - * @range: The size of the area, starting at @start, within which the - * random address must fall. - * - * If @start + @range would overflow, @range is capped. - * - * NOTE: Historical use of randomize_range, which this replaces, presumed that - * @start was already page aligned. We now align it regardless. - * - * Return: A page aligned address within [start, start + range). On error, - * @start is returned. - */ -unsigned long randomize_page(unsigned long start, unsigned long range) -{ - if (!PAGE_ALIGNED(start)) { - range -= PAGE_ALIGN(start) - start; - start = PAGE_ALIGN(start); - } - - if (start > ULONG_MAX - range) - range = ULONG_MAX - start; - - range >>= PAGE_SHIFT; - - if (range == 0) - return start; - - return start + (get_random_long() % range << PAGE_SHIFT); -} - -/* - * This function will use the architecture-specific hardware random - * number generator if it is available. It is not recommended for - * use. Use get_random_bytes() instead. It returns the number of - * bytes filled in. - */ -size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) -{ - size_t left = nbytes; - u8 *p = buf; - - while (left) { - unsigned long v; - size_t chunk = min_t(size_t, left, sizeof(unsigned long)); - - if (!arch_get_random_long(&v)) - break; - - memcpy(p, &v, chunk); - p += chunk; - left -= chunk; - } - - return nbytes - left; -} -EXPORT_SYMBOL(get_random_bytes_arch); - /********************************************************************** * @@ -753,33 +535,28 @@ EXPORT_SYMBOL(get_random_bytes_arch); * * Callers may add entropy via: * - * static void mix_pool_bytes(const void *in, size_t nbytes) + * static void mix_pool_bytes(const void *buf, size_t len) * * After which, if added entropy should be credited: * - * static void credit_entropy_bits(size_t nbits) + * static void credit_init_bits(size_t bits) * - * Finally, extract entropy via these two, with the latter one - * setting the entropy count to zero and extracting only if there - * is POOL_MIN_BITS entropy credited prior or force is true: + * Finally, extract entropy via: * - * static void extract_entropy(void *buf, size_t nbytes) - * static bool drain_entropy(void *buf, size_t nbytes, bool force) + * static void extract_entropy(void *buf, size_t len) * **********************************************************************/ enum { POOL_BITS = BLAKE2S_HASH_SIZE * 8, - POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ + POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ + POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ }; -/* For notifying userspace should write into /dev/random. */ -static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); - static struct { struct blake2s_state hash; spinlock_t lock; - unsigned int entropy_count; + unsigned int init_bits; } input_pool = { .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, @@ -788,48 +565,30 @@ static struct { .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), }; -static void _mix_pool_bytes(const void *in, size_t nbytes) +static void _mix_pool_bytes(const void *buf, size_t len) { - blake2s_update(&input_pool.hash, in, nbytes); + blake2s_update(&input_pool.hash, buf, len); } /* - * This function adds bytes into the entropy "pool". It does not - * update the entropy estimate. The caller should call - * credit_entropy_bits if this is appropriate. + * This function adds bytes into the input pool. It does not + * update the initialization bit counter; the caller should call + * credit_init_bits if this is appropriate. */ -static void mix_pool_bytes(const void *in, size_t nbytes) +static void mix_pool_bytes(const void *buf, size_t len) { unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(in, nbytes); + _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } -static void credit_entropy_bits(size_t nbits) -{ - unsigned int entropy_count, orig, add; - - if (!nbits) - return; - - add = min_t(size_t, nbits, POOL_BITS); - - do { - orig = READ_ONCE(input_pool.entropy_count); - entropy_count = min_t(unsigned int, POOL_BITS, orig + add); - } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); - - if (!crng_ready() && entropy_count >= POOL_MIN_BITS) - crng_reseed(false); -} - /* * This is an HKDF-like construction for using the hashed collected entropy * as a PRF key, that's then expanded block-by-block. */ -static void extract_entropy(void *buf, size_t nbytes) +static void extract_entropy(void *buf, size_t len) { unsigned long flags; u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; @@ -858,12 +617,12 @@ static void extract_entropy(void *buf, size_t nbytes) spin_unlock_irqrestore(&input_pool.lock, flags); memzero_explicit(next_key, sizeof(next_key)); - while (nbytes) { - i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); + while (len) { + i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); - nbytes -= i; + len -= i; buf += i; } @@ -871,23 +630,42 @@ static void extract_entropy(void *buf, size_t nbytes) memzero_explicit(&block, sizeof(block)); } -/* - * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force - * is true, and then we set the entropy count to zero (but don't actually touch - * any data). Only then can we extract a new key with extract_entropy(). - */ -static bool drain_entropy(void *buf, size_t nbytes, bool force) +#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) + +static void __cold _credit_init_bits(size_t bits) { - unsigned int entropy_count; + static struct execute_work set_ready; + unsigned int new, orig, add; + unsigned long flags; + + if (!bits) + return; + + add = min_t(size_t, bits, POOL_BITS); + do { - entropy_count = READ_ONCE(input_pool.entropy_count); - if (!force && entropy_count < POOL_MIN_BITS) - return false; - } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); - extract_entropy(buf, nbytes); - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - return true; + orig = READ_ONCE(input_pool.init_bits); + new = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); + + if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { + crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ + execute_in_process_context(crng_set_ready, &set_ready); + wake_up_interruptible(&crng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + pr_notice("crng init done\n"); + if (urandom_warning.missed) + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", + urandom_warning.missed); + } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { + spin_lock_irqsave(&base_crng.lock, flags); + /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ + if (crng_init == CRNG_EMPTY) { + extract_entropy(base_crng.key, sizeof(base_crng.key)); + crng_init = CRNG_EARLY; + } + spin_unlock_irqrestore(&base_crng.lock, flags); + } } @@ -898,15 +676,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) * The following exported functions are used for pushing entropy into * the above entropy accumulation routines: * - * void add_device_randomness(const void *buf, size_t size); - * void add_input_randomness(unsigned int type, unsigned int code, - * unsigned int value); - * void add_disk_randomness(struct gendisk *disk); - * void add_hwgenerator_randomness(const void *buffer, size_t count, - * size_t entropy); - * void add_bootloader_randomness(const void *buf, size_t size); - * void add_vmfork_randomness(const void *unique_vm_id, size_t size); + * void add_device_randomness(const void *buf, size_t len); + * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); + * void add_bootloader_randomness(const void *buf, size_t len); + * void add_vmfork_randomness(const void *unique_vm_id, size_t len); * void add_interrupt_randomness(int irq); + * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); + * void add_disk_randomness(struct gendisk *disk); * * add_device_randomness() adds data to the input pool that * is likely to differ between two devices (or possibly even per boot). @@ -916,26 +692,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) * that might otherwise be identical and have very little entropy * available to them (particularly common in the embedded world). * - * add_input_randomness() uses the input layer interrupt timing, as well - * as the event type information from the hardware. - * - * add_disk_randomness() uses what amounts to the seek time of block - * layer request events, on a per-disk_devt basis, as input to the - * entropy pool. Note that high-speed solid state drives with very low - * seek times do not make for good sources of entropy, as their seek - * times are usually fairly consistent. - * - * The above two routines try to estimate how many bits of entropy - * to credit. They do this by keeping track of the first and second - * order deltas of the event timings. - * * add_hwgenerator_randomness() is for true hardware RNGs, and will credit * entropy as specified by the caller. If the entropy pool is full it will * block until more entropy is needed. * - * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or - * add_device_randomness(), depending on whether or not the configuration - * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. + * add_bootloader_randomness() is called by bootloader drivers, such as EFI + * and device tree, and credits its input depending on whether or not the + * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. * * add_vmfork_randomness() adds a unique (but not necessarily secret) ID * representing the current instance of a VM to the pool, without crediting, @@ -946,8 +709,22 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) * as inputs, it feeds the input pool roughly once a second or after 64 * interrupts, crediting 1 bit of entropy for whichever comes first. * + * add_input_randomness() uses the input layer interrupt timing, as well + * as the event type information from the hardware. + * + * add_disk_randomness() uses what amounts to the seek time of block + * layer request events, on a per-disk_devt basis, as input to the + * entropy pool. Note that high-speed solid state drives with very low + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * + * The last two routines try to estimate how many bits of entropy + * to credit. They do this by keeping track of the first and second + * order deltas of the event timings. + * **********************************************************************/ +static bool used_arch_random; static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) @@ -961,52 +738,91 @@ static int __init parse_trust_bootloader(char *arg) early_param("random.trust_cpu", parse_trust_cpu); early_param("random.trust_bootloader", parse_trust_bootloader); +static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data) +{ + unsigned long flags, entropy = random_get_entropy(); + + /* + * Encode a representation of how long the system has been suspended, + * in a way that is distinct from prior system suspends. + */ + ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() }; + + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&action, sizeof(action)); + _mix_pool_bytes(stamps, sizeof(stamps)); + _mix_pool_bytes(&entropy, sizeof(entropy)); + spin_unlock_irqrestore(&input_pool.lock, flags); + + if (crng_ready() && (action == PM_RESTORE_PREPARE || + (action == PM_POST_SUSPEND && + !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) { + crng_reseed(); + pr_notice("crng reseeded on system resumption\n"); + } + return 0; +} + +static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification }; + /* * The first collection of entropy occurs at system boot while interrupts - * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). - * Depending on the above configuration knob, RDSEED may be considered - * sufficient for initialization. Note that much earlier setup may already - * have pushed entropy into the input pool by the time we get here. + * are still turned off. Here we push in latent entropy, RDSEED, a timestamp, + * utsname(), and the command line. Depending on the above configuration knob, + * RDSEED may be considered sufficient for initialization. Note that much + * earlier setup may already have pushed entropy into the input pool by the + * time we get here. */ -int __init rand_initialize(void) +int __init random_init(const char *command_line) { - size_t i; ktime_t now = ktime_get_real(); - bool arch_init = true; - unsigned long rv; + unsigned int i, arch_bytes; + unsigned long entropy; #if defined(LATENT_ENTROPY_PLUGIN) static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { - if (!arch_get_random_seed_long_early(&rv) && - !arch_get_random_long_early(&rv)) { - rv = random_get_entropy(); - arch_init = false; + for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { + if (!arch_get_random_seed_long_early(&entropy) && + !arch_get_random_long_early(&entropy)) { + entropy = random_get_entropy(); + arch_bytes -= sizeof(entropy); } - _mix_pool_bytes(&rv, sizeof(rv)); + _mix_pool_bytes(&entropy, sizeof(entropy)); } _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(utsname(), sizeof(*(utsname()))); + _mix_pool_bytes(command_line, strlen(command_line)); + add_latent_entropy(); - extract_entropy(base_crng.key, sizeof(base_crng.key)); - ++base_crng.generation; + if (crng_ready()) + crng_reseed(); + else if (trust_cpu) + credit_init_bits(arch_bytes * 8); + used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; - if (arch_init && trust_cpu && !crng_ready()) { - crng_init = 2; - pr_notice("crng init done (trusting CPU's manufacturer)\n"); - } + WARN_ON(register_pm_notifier(&pm_notifier)); - if (ratelimit_disable) { - urandom_warning.interval = 0; - unseeded_warning.interval = 0; - } + WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " + "entropy collection will consequently suffer."); return 0; } /* + * Returns whether arch randomness has been mixed into the initial + * state of the RNG, regardless of whether or not that randomness + * was credited. Knowing this is only good for a very limited set + * of uses, such as early init printk pointer obfuscation. + */ +bool rng_has_arch_random(void) +{ + return used_arch_random; +} + +/* * Add device- or boot-specific data to the input pool to help * initialize it. * @@ -1014,168 +830,46 @@ int __init rand_initialize(void) * the entropy pool having similar initial state across largely * identical devices. */ -void add_device_randomness(const void *buf, size_t size) +void add_device_randomness(const void *buf, size_t len) { - cycles_t cycles = random_get_entropy(); - unsigned long flags, now = jiffies; - - if (crng_init == 0 && size) - crng_pre_init_inject(buf, size, false); + unsigned long entropy = random_get_entropy(); + unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&cycles, sizeof(cycles)); - _mix_pool_bytes(&now, sizeof(now)); - _mix_pool_bytes(buf, size); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); -/* There is one of these per entropy source */ -struct timer_rand_state { - unsigned long last_time; - long last_delta, last_delta2; -}; - -/* - * This function adds entropy to the entropy "pool" by using timing - * delays. It uses the timer_rand_state structure to make an estimate - * of how many bits of entropy this call has added to the pool. - * - * The number "num" is also added to the pool - it should somehow describe - * the type of event which just happened. This is currently 0-255 for - * keyboard scan codes, and 256 upwards for interrupts. - */ -static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) -{ - cycles_t cycles = random_get_entropy(); - unsigned long flags, now = jiffies; - long delta, delta2, delta3; - - spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&cycles, sizeof(cycles)); - _mix_pool_bytes(&now, sizeof(now)); - _mix_pool_bytes(&num, sizeof(num)); - spin_unlock_irqrestore(&input_pool.lock, flags); - - /* - * Calculate number of bits of randomness we probably added. - * We take into account the first, second and third-order deltas - * in order to make our estimate. - */ - delta = now - READ_ONCE(state->last_time); - WRITE_ONCE(state->last_time, now); - - delta2 = delta - READ_ONCE(state->last_delta); - WRITE_ONCE(state->last_delta, delta); - - delta3 = delta2 - READ_ONCE(state->last_delta2); - WRITE_ONCE(state->last_delta2, delta2); - - if (delta < 0) - delta = -delta; - if (delta2 < 0) - delta2 = -delta2; - if (delta3 < 0) - delta3 = -delta3; - if (delta > delta2) - delta = delta2; - if (delta > delta3) - delta = delta3; - - /* - * delta is now minimum absolute delta. - * Round down by 1 bit on general principles, - * and limit entropy estimate to 12 bits. - */ - credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); -} - -void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) -{ - static unsigned char last_value; - static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; - - /* Ignore autorepeat and the like. */ - if (value == last_value) - return; - - last_value = value; - add_timer_randomness(&input_timer_state, - (type << 4) ^ code ^ (code >> 4) ^ value); -} -EXPORT_SYMBOL_GPL(add_input_randomness); - -#ifdef CONFIG_BLOCK -void add_disk_randomness(struct gendisk *disk) -{ - if (!disk || !disk->random) - return; - /* First major is 1, so we get >= 0x200 here. */ - add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); -} -EXPORT_SYMBOL_GPL(add_disk_randomness); - -void rand_initialize_disk(struct gendisk *disk) -{ - struct timer_rand_state *state; - - /* - * If kzalloc returns null, we just won't use that entropy - * source. - */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) { - state->last_time = INITIAL_JIFFIES; - disk->random = state; - } -} -#endif - /* * Interface for in-kernel drivers of true hardware RNGs. * Those devices may produce endless random bits and will be throttled * when our pool is full. */ -void add_hwgenerator_randomness(const void *buffer, size_t count, - size_t entropy) +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy) { - if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) { - size_t ret = crng_pre_init_inject(buffer, count, true); - mix_pool_bytes(buffer, ret); - count -= ret; - buffer += ret; - if (!count || crng_init == 0) - return; - } + mix_pool_bytes(buf, len); + credit_init_bits(entropy); /* - * Throttle writing if we're above the trickle threshold. - * We'll be woken up again once below POOL_MIN_BITS, when - * the calling thread is about to terminate, or once - * CRNG_RESEED_INTERVAL has elapsed. + * Throttle writing to once every CRNG_RESEED_INTERVAL, unless + * we're not yet initialized. */ - wait_event_interruptible_timeout(random_write_wait, - !system_wq || kthread_should_stop() || - input_pool.entropy_count < POOL_MIN_BITS, - CRNG_RESEED_INTERVAL); - mix_pool_bytes(buffer, count); - credit_entropy_bits(entropy); + if (!kthread_should_stop() && crng_ready()) + schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); /* - * Handle random seed passed by bootloader. - * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise - * it would be regarded as device data. - * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + * Handle random seed passed by bootloader, and credit it if + * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void add_bootloader_randomness(const void *buf, size_t size) +void __cold add_bootloader_randomness(const void *buf, size_t len) { + mix_pool_bytes(buf, len); if (trust_bootloader) - add_hwgenerator_randomness(buf, size, size * 8); - else - add_device_randomness(buf, size); + credit_init_bits(len * 8); } EXPORT_SYMBOL_GPL(add_bootloader_randomness); @@ -1187,11 +881,11 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain); * don't credit it, but we do immediately force a reseed after so * that it's used by the crng posthaste. */ -void add_vmfork_randomness(const void *unique_vm_id, size_t size) +void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) { - add_device_randomness(unique_vm_id, size); + add_device_randomness(unique_vm_id, len); if (crng_ready()) { - crng_reseed(true); + crng_reseed(); pr_notice("crng reseeded due to virtual machine fork\n"); } blocking_notifier_call_chain(&vmfork_chain, 0, NULL); @@ -1200,13 +894,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif -int register_random_vmfork_notifier(struct notifier_block *nb) +int __cold register_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmfork_chain, nb); } EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); -int unregister_random_vmfork_notifier(struct notifier_block *nb) +int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmfork_chain, nb); } @@ -1218,17 +912,15 @@ struct fast_pool { unsigned long pool[4]; unsigned long last; unsigned int count; - u16 reg_idx; }; static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { #ifdef CONFIG_64BIT - /* SipHash constants */ - .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, - 0x6c7967656e657261UL, 0x7465646279746573UL } +#define FASTMIX_PERM SIPHASH_PERMUTATION + .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 } #else - /* HalfSipHash constants */ - .pool = { 0, 0, 0x6c796765U, 0x74656462U } +#define FASTMIX_PERM HSIPHASH_PERMUTATION + .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 } #endif }; @@ -1236,27 +928,16 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { * This is [Half]SipHash-1-x, starting from an empty key. Because * the key is fixed, it assumes that its inputs are non-malicious, * and therefore this has no security on its own. s represents the - * 128 or 256-bit SipHash state, while v represents a 128-bit input. + * four-word SipHash state, while v represents a two-word input. */ -static void fast_mix(unsigned long s[4], const unsigned long *v) +static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) { - size_t i; - - for (i = 0; i < 16 / sizeof(long); ++i) { - s[3] ^= v[i]; -#ifdef CONFIG_64BIT - s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); - s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); -#else - s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); - s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); -#endif - s[0] ^= v[i]; - } + s[3] ^= v1; + FASTMIX_PERM(s[0], s[1], s[2], s[3]); + s[0] ^= v1; + s[3] ^= v2; + FASTMIX_PERM(s[0], s[1], s[2], s[3]); + s[0] ^= v2; } #ifdef CONFIG_SMP @@ -1264,7 +945,7 @@ static void fast_mix(unsigned long s[4], const unsigned long *v) * This function is called when the CPU has just come online, with * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. */ -int random_online_cpu(unsigned int cpu) +int __cold random_online_cpu(unsigned int cpu) { /* * During CPU shutdown and before CPU onlining, add_interrupt_ @@ -1282,33 +963,18 @@ int random_online_cpu(unsigned int cpu) } #endif -static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs) -{ - unsigned long *ptr = (unsigned long *)regs; - unsigned int idx; - - if (regs == NULL) - return 0; - idx = READ_ONCE(f->reg_idx); - if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long)) - idx = 0; - ptr += idx++; - WRITE_ONCE(f->reg_idx, idx); - return *ptr; -} - static void mix_interrupt_randomness(struct work_struct *work) { struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); /* - * The size of the copied stack pool is explicitly 16 bytes so that we - * tax mix_pool_byte()'s compression function the same amount on all - * platforms. This means on 64-bit we copy half the pool into this, - * while on 32-bit we copy all of it. The entropy is supposed to be - * sufficiently dispersed between bits that in the sponge-like - * half case, on average we don't wind up "losing" some. + * The size of the copied stack pool is explicitly 2 longs so that we + * only ever ingest half of the siphash output each time, retaining + * the other half as the next "key" that carries over. The entropy is + * supposed to be sufficiently dispersed between bits so on average + * we don't wind up "losing" some. */ - u8 pool[16]; + unsigned long pool[2]; + unsigned int count; /* Check to see if we're running on the wrong CPU due to hotplug. */ local_irq_disable(); @@ -1322,17 +988,13 @@ static void mix_interrupt_randomness(struct work_struct *work) * consistent view, before we reenable irqs again. */ memcpy(pool, fast_pool->pool, sizeof(pool)); + count = fast_pool->count; fast_pool->count = 0; fast_pool->last = jiffies; local_irq_enable(); - if (unlikely(crng_init == 0)) { - crng_pre_init_inject(pool, sizeof(pool), true); - mix_pool_bytes(pool, sizeof(pool)); - } else { - mix_pool_bytes(pool, sizeof(pool)); - credit_entropy_bits(1); - } + mix_pool_bytes(pool, sizeof(pool)); + credit_init_bits(max(1u, (count & U16_MAX) / 64)); memzero_explicit(pool, sizeof(pool)); } @@ -1340,42 +1002,19 @@ static void mix_interrupt_randomness(struct work_struct *work) void add_interrupt_randomness(int irq) { enum { MIX_INFLIGHT = 1U << 31 }; - cycles_t cycles = random_get_entropy(); - unsigned long now = jiffies; + unsigned long entropy = random_get_entropy(); struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned int new_count; - union { - u32 u32[4]; - u64 u64[2]; - unsigned long longs[16 / sizeof(long)]; - } irq_data; - - if (cycles == 0) - cycles = get_reg(fast_pool, regs); - - if (sizeof(cycles) == 8) - irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; - else { - irq_data.u32[0] = cycles ^ irq; - irq_data.u32[1] = now; - } - if (sizeof(unsigned long) == 8) - irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; - else { - irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; - irq_data.u32[3] = get_reg(fast_pool, regs); - } - - fast_mix(fast_pool->pool, irq_data.longs); + fast_mix(fast_pool->pool, entropy, + (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); new_count = ++fast_pool->count; if (new_count & MIX_INFLIGHT) return; - if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) || - unlikely(crng_init == 0))) + if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) return; if (unlikely(!fast_pool->mix.func)) @@ -1385,6 +1024,132 @@ void add_interrupt_randomness(int irq) } EXPORT_SYMBOL_GPL(add_interrupt_randomness); +/* There is one of these per entropy source */ +struct timer_rand_state { + unsigned long last_time; + long last_delta, last_delta2; +}; + +/* + * This function adds entropy to the entropy "pool" by using timing + * delays. It uses the timer_rand_state structure to make an estimate + * of how many bits of entropy this call has added to the pool. The + * value "num" is also added to the pool; it should somehow describe + * the type of event that just happened. + */ +static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) +{ + unsigned long entropy = random_get_entropy(), now = jiffies, flags; + long delta, delta2, delta3; + unsigned int bits; + + /* + * If we're in a hard IRQ, add_interrupt_randomness() will be called + * sometime after, so mix into the fast pool. + */ + if (in_hardirq()) { + fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); + } else { + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(&num, sizeof(num)); + spin_unlock_irqrestore(&input_pool.lock, flags); + } + + if (crng_ready()) + return; + + /* + * Calculate number of bits of randomness we probably added. + * We take into account the first, second and third-order deltas + * in order to make our estimate. + */ + delta = now - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, now); + + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); + + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); + + if (delta < 0) + delta = -delta; + if (delta2 < 0) + delta2 = -delta2; + if (delta3 < 0) + delta3 = -delta3; + if (delta > delta2) + delta = delta2; + if (delta > delta3) + delta = delta3; + + /* + * delta is now minimum absolute delta. Round down by 1 bit + * on general principles, and limit entropy estimate to 11 bits. + */ + bits = min(fls(delta >> 1), 11); + + /* + * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() + * will run after this, which uses a different crediting scheme of 1 bit + * per every 64 interrupts. In order to let that function do accounting + * close to the one in this function, we credit a full 64/64 bit per bit, + * and then subtract one to account for the extra one added. + */ + if (in_hardirq()) + this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; + else + _credit_init_bits(bits); +} + +void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) +{ + static unsigned char last_value; + static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; + + /* Ignore autorepeat and the like. */ + if (value == last_value) + return; + + last_value = value; + add_timer_randomness(&input_timer_state, + (type << 4) ^ code ^ (code >> 4) ^ value); +} +EXPORT_SYMBOL_GPL(add_input_randomness); + +#ifdef CONFIG_BLOCK +void add_disk_randomness(struct gendisk *disk) +{ + if (!disk || !disk->random) + return; + /* First major is 1, so we get >= 0x200 here. */ + add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); +} +EXPORT_SYMBOL_GPL(add_disk_randomness); + +void __cold rand_initialize_disk(struct gendisk *disk) +{ + struct timer_rand_state *state; + + /* + * If kzalloc returns null, we just won't use that entropy + * source. + */ + state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); + if (state) { + state->last_time = INITIAL_JIFFIES; + disk->random = state; + } +} +#endif + +struct entropy_timer_state { + unsigned long entropy; + struct timer_list timer; + unsigned int samples, samples_per_bit; +}; + /* * Each time the timer fires, we expect that we got an unpredictable * jump in the cycle counter. Even if the timer is running on another @@ -1398,40 +1163,50 @@ EXPORT_SYMBOL_GPL(add_interrupt_randomness); * * So the re-arming always happens in the entropy loop itself. */ -static void entropy_timer(struct timer_list *t) +static void __cold entropy_timer(struct timer_list *timer) { - credit_entropy_bits(1); + struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); + + if (++state->samples == state->samples_per_bit) { + credit_init_bits(1); + state->samples = 0; + } } /* * If we have an actual cycle counter, see if we can * generate enough entropy with timing noise */ -static void try_to_generate_entropy(void) +static void __cold try_to_generate_entropy(void) { - struct { - cycles_t cycles; - struct timer_list timer; - } stack; - - stack.cycles = random_get_entropy(); - - /* Slow counter - or none. Don't even bother */ - if (stack.cycles == random_get_entropy()) + enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 }; + struct entropy_timer_state stack; + unsigned int i, num_different = 0; + unsigned long last = random_get_entropy(); + + for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { + stack.entropy = random_get_entropy(); + if (stack.entropy != last) + ++num_different; + last = stack.entropy; + } + stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); + if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT) return; + stack.samples = 0; timer_setup_on_stack(&stack.timer, entropy_timer, 0); while (!crng_ready() && !signal_pending(current)) { if (!timer_pending(&stack.timer)) mod_timer(&stack.timer, jiffies + 1); - mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); schedule(); - stack.cycles = random_get_entropy(); + stack.entropy = random_get_entropy(); } del_timer_sync(&stack.timer); destroy_timer_on_stack(&stack.timer); - mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); } @@ -1463,9 +1238,12 @@ static void try_to_generate_entropy(void) * **********************************************************************/ -SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, - flags) +SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { + struct iov_iter iter; + struct iovec iov; + int ret; + if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; @@ -1476,106 +1254,102 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) return -EINVAL; - if (count > INT_MAX) - count = INT_MAX; - - if (!(flags & GRND_INSECURE) && !crng_ready()) { - int ret; - + if (!crng_ready() && !(flags & GRND_INSECURE)) { if (flags & GRND_NONBLOCK) return -EAGAIN; ret = wait_for_random_bytes(); if (unlikely(ret)) return ret; } - return get_random_bytes_user(buf, count); + + ret = import_single_range(READ, ubuf, len, &iov, &iter); + if (unlikely(ret)) + return ret; + return get_random_bytes_user(&iter); } static __poll_t random_poll(struct file *file, poll_table *wait) { - __poll_t mask; - poll_wait(file, &crng_init_wait, wait); - poll_wait(file, &random_write_wait, wait); - mask = 0; - if (crng_ready()) - mask |= EPOLLIN | EPOLLRDNORM; - if (input_pool.entropy_count < POOL_MIN_BITS) - mask |= EPOLLOUT | EPOLLWRNORM; - return mask; + return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } -static int write_pool(const char __user *ubuf, size_t count) +static ssize_t write_pool_user(struct iov_iter *iter) { - size_t len; - int ret = 0; u8 block[BLAKE2S_BLOCK_SIZE]; + ssize_t ret = 0; + size_t copied; + + if (unlikely(!iov_iter_count(iter))) + return 0; - while (count) { - len = min(count, sizeof(block)); - if (copy_from_user(block, ubuf, len)) { - ret = -EFAULT; - goto out; + for (;;) { + copied = copy_from_iter(block, sizeof(block), iter); + ret += copied; + mix_pool_bytes(block, copied); + if (!iov_iter_count(iter) || copied != sizeof(block)) + break; + + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); } - count -= len; - ubuf += len; - mix_pool_bytes(block, len); - cond_resched(); } -out: memzero_explicit(block, sizeof(block)); - return ret; + return ret ? ret : -EFAULT; } -static ssize_t random_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) +static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) { - int ret; - - ret = write_pool(buffer, count); - if (ret) - return ret; - - return (ssize_t)count; + return write_pool_user(iter); } -static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) +static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { static int maxwarn = 10; - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); + /* + * Opportunistically attempt to initialize the RNG on platforms that + * have fast cycle counters, but don't (for now) require it to succeed. + */ + if (!crng_ready()) + try_to_generate_entropy(); + + if (!crng_ready()) { + if (!ratelimit_disable && maxwarn <= 0) + ++urandom_warning.missed; + else if (ratelimit_disable || __ratelimit(&urandom_warning)) { + --maxwarn; + pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", + current->comm, iov_iter_count(iter)); + } } - return get_random_bytes_user(buf, nbytes); + return get_random_bytes_user(iter); } -static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) +static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { int ret; ret = wait_for_random_bytes(); if (ret != 0) return ret; - return get_random_bytes_user(buf, nbytes); + return get_random_bytes_user(iter); } static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { - int size, ent_count; int __user *p = (int __user *)arg; - int retval; + int ent_count; switch (cmd) { case RNDGETENTCNT: /* Inherently racy, no point locking. */ - if (put_user(input_pool.entropy_count, p)) + if (put_user(input_pool.init_bits, p)) return -EFAULT; return 0; case RNDADDTOENTCNT: @@ -1585,41 +1359,46 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EFAULT; if (ent_count < 0) return -EINVAL; - credit_entropy_bits(ent_count); + credit_init_bits(ent_count); return 0; - case RNDADDENTROPY: + case RNDADDENTROPY: { + struct iov_iter iter; + struct iovec iov; + ssize_t ret; + int len; + if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ent_count, p++)) return -EFAULT; if (ent_count < 0) return -EINVAL; - if (get_user(size, p++)) + if (get_user(len, p++)) return -EFAULT; - retval = write_pool((const char __user *)p, size); - if (retval < 0) - return retval; - credit_entropy_bits(ent_count); + ret = import_single_range(WRITE, p, len, &iov, &iter); + if (unlikely(ret)) + return ret; + ret = write_pool_user(&iter); + if (unlikely(ret < 0)) + return ret; + /* Since we're crediting, enforce that it was all written into the pool. */ + if (unlikely(ret != len)) + return -EFAULT; + credit_init_bits(ent_count); return 0; + } case RNDZAPENTCNT: case RNDCLEARPOOL: - /* - * Clear the entropy pool counters. We no longer clear - * the entropy pool, as that's silly. - */ + /* No longer has any effect. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!crng_ready()) return -ENODATA; - crng_reseed(false); + crng_reseed(); return 0; default: return -EINVAL; @@ -1632,22 +1411,26 @@ static int random_fasync(int fd, struct file *filp, int on) } const struct file_operations random_fops = { - .read = random_read, - .write = random_write, + .read_iter = random_read_iter, + .write_iter = random_write_iter, .poll = random_poll, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, }; const struct file_operations urandom_fops = { - .read = urandom_read, - .write = random_write, + .read_iter = urandom_read_iter, + .write_iter = random_write_iter, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, }; @@ -1671,7 +1454,7 @@ const struct file_operations urandom_fops = { * * - write_wakeup_threshold - the amount of entropy in the input pool * below which write polls to /dev/random will unblock, requesting - * more entropy, tied to the POOL_MIN_BITS constant. It is writable + * more entropy, tied to the POOL_READY_BITS constant. It is writable * to avoid breaking old userspaces, but writing to it does not * change any behavior of the RNG. * @@ -1686,7 +1469,7 @@ const struct file_operations urandom_fops = { #include <linux/sysctl.h> static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; -static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS; +static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; static int sysctl_poolsize = POOL_BITS; static u8 sysctl_bootid[UUID_SIZE]; @@ -1695,7 +1478,7 @@ static u8 sysctl_bootid[UUID_SIZE]; * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ -static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, +static int proc_do_uuid(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; @@ -1722,14 +1505,14 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, } snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); - return proc_dostring(&fake_table, 0, buffer, lenp, ppos); + return proc_dostring(&fake_table, 0, buf, lenp, ppos); } /* The same as proc_dointvec, but writes don't change anything. */ -static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, +static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { - return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); + return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } static struct ctl_table random_table[] = { @@ -1742,7 +1525,7 @@ static struct ctl_table random_table[] = { }, { .procname = "entropy_avail", - .data = &input_pool.entropy_count, + .data = &input_pool.init_bits, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, @@ -1776,8 +1559,8 @@ static struct ctl_table random_table[] = { }; /* - * rand_initialize() is called before sysctl_init(), - * so we cannot call register_sysctl_init() in rand_initialize() + * random_init() is called before sysctl_init(), + * so we cannot call register_sysctl_init() in random_init() */ static int __init random_sysctls_init(void) { diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 4704fa553098..c1eb5d223839 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, if (!rc) { out = (struct tpm2_get_cap_out *) &buf.data[TPM_HEADER_SIZE]; - *value = be32_to_cpu(out->value); + /* + * To prevent failing boot up of some systems, Infineon TPM2.0 + * returns SUCCESS on TPM2_Startup in field upgrade mode. Also + * the TPM2_Getcapability command returns a zero length list + * in field upgrade mode. + */ + if (be32_to_cpu(out->property_cnt) > 0) + *value = be32_to_cpu(out->value); + else + rc = -ENODATA; } tpm_buf_destroy(&buf); return rc; @@ -745,7 +754,11 @@ int tpm2_auto_startup(struct tpm_chip *chip) rc = tpm2_get_cc_attrs_tbl(chip); out: - if (rc == TPM2_RC_UPGRADE) { + /* + * Infineon TPM in field upgrade mode will return no data for the number + * of supported commands. + */ + if (rc == TPM2_RC_UPGRADE || rc == -ENODATA) { dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n"); chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE; rc = 0; diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6e3235565a4d..5c233423c56f 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -177,7 +177,7 @@ static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip) static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status) { - return 0; + return false; } static const struct tpm_class_ops ftpm_tee_tpm_ops = { diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 3af4c07a9342..d3989b257f42 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (!wait_event_timeout(ibmvtpm->crq_queue.wq, ibmvtpm->rtce_buf != NULL, HZ)) { + rc = -ENODEV; dev_err(dev, "CRQ response timed out\n"); goto init_irq_cleanup; } diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index d3f2e5364c27..bcff6429e0b4 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -153,50 +153,46 @@ static int check_acpi_tpm2(struct device *dev) #endif static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *result) + u8 *result, enum tpm_tis_io_mode io_mode) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - - while (len--) - *result++ = ioread8(phy->iobase + addr); + __le16 result_le16; + __le32 result_le32; + + switch (io_mode) { + case TPM_TIS_PHYS_8: + while (len--) + *result++ = ioread8(phy->iobase + addr); + break; + case TPM_TIS_PHYS_16: + result_le16 = cpu_to_le16(ioread16(phy->iobase + addr)); + memcpy(result, &result_le16, sizeof(u16)); + break; + case TPM_TIS_PHYS_32: + result_le32 = cpu_to_le32(ioread32(phy->iobase + addr)); + memcpy(result, &result_le32, sizeof(u32)); + break; + } return 0; } static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - const u8 *value) + const u8 *value, enum tpm_tis_io_mode io_mode) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - while (len--) - iowrite8(*value++, phy->iobase + addr); - - return 0; -} - -static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result) -{ - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - - *result = ioread16(phy->iobase + addr); - - return 0; -} - -static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result) -{ - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - - *result = ioread32(phy->iobase + addr); - - return 0; -} - -static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) -{ - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - - iowrite32(value, phy->iobase + addr); + switch (io_mode) { + case TPM_TIS_PHYS_8: + while (len--) + iowrite8(*value++, phy->iobase + addr); + break; + case TPM_TIS_PHYS_16: + return -EINVAL; + case TPM_TIS_PHYS_32: + iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr); + break; + } return 0; } @@ -204,9 +200,6 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) static const struct tpm_tis_phy_ops tpm_tcg = { .read_bytes = tpm_tcg_read_bytes, .write_bytes = tpm_tcg_write_bytes, - .read16 = tpm_tcg_read16, - .read32 = tpm_tcg_read32, - .write32 = tpm_tcg_write32, }; static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 3be24f221e32..6c203f36b8a1 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -104,54 +104,88 @@ struct tpm_tis_data { unsigned int timeout_max; /* usecs */ }; +/* + * IO modes to indicate how many bytes should be read/written at once in the + * tpm_tis_phy_ops read_bytes/write_bytes calls. Use TPM_TIS_PHYS_8 to + * receive/transmit byte-wise, TPM_TIS_PHYS_16 for two bytes etc. + */ +enum tpm_tis_io_mode { + TPM_TIS_PHYS_8, + TPM_TIS_PHYS_16, + TPM_TIS_PHYS_32, +}; + struct tpm_tis_phy_ops { + /* data is passed in little endian */ int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *result); + u8 *result, enum tpm_tis_io_mode mode); int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, - const u8 *value); - int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); - int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); - int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); + const u8 *value, enum tpm_tis_io_mode mode); }; static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { - return data->phy_ops->read_bytes(data, addr, len, result); + return data->phy_ops->read_bytes(data, addr, len, result, + TPM_TIS_PHYS_8); } static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result) { - return data->phy_ops->read_bytes(data, addr, 1, result); + return data->phy_ops->read_bytes(data, addr, 1, result, TPM_TIS_PHYS_8); } static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr, u16 *result) { - return data->phy_ops->read16(data, addr, result); + __le16 result_le; + int rc; + + rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), + (u8 *)&result_le, TPM_TIS_PHYS_16); + if (!rc) + *result = le16_to_cpu(result_le); + + return rc; } static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, u32 *result) { - return data->phy_ops->read32(data, addr, result); + __le32 result_le; + int rc; + + rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), + (u8 *)&result_le, TPM_TIS_PHYS_32); + if (!rc) + *result = le32_to_cpu(result_le); + + return rc; } static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value) { - return data->phy_ops->write_bytes(data, addr, len, value); + return data->phy_ops->write_bytes(data, addr, len, value, + TPM_TIS_PHYS_8); } static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value) { - return data->phy_ops->write_bytes(data, addr, 1, &value); + return data->phy_ops->write_bytes(data, addr, 1, &value, + TPM_TIS_PHYS_8); } static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr, u32 value) { - return data->phy_ops->write32(data, addr, value); + __le32 value_le; + int rc; + + value_le = cpu_to_le32(value); + rc = data->phy_ops->write_bytes(data, addr, sizeof(u32), + (u8 *)&value_le, TPM_TIS_PHYS_32); + return rc; } static inline bool is_bsw(void) diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index f6c0affbb456..974479a1ec5a 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -31,6 +31,7 @@ #define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */ #define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */ #define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */ +#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */ #define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */ #define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */ #define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */ @@ -742,15 +743,15 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) } vendor = le32_to_cpup((__le32 *)buf); - if (vendor != TPM_CR50_I2C_DID_VID) { + if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) { dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor); tpm_cr50_release_locality(chip, true); return -ENODEV; } - dev_info(dev, "cr50 TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", + dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", + vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50", client->addr, client->irq, vendor >> 16); - return tpm_chip_register(chip); } @@ -768,8 +769,8 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client) struct device *dev = &client->dev; if (!chip) { - dev_err(dev, "Could not get client data at remove\n"); - return -ENODEV; + dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n"); + return 0; } tpm_chip_unregister(chip); diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h index bba73979c368..d0f66f6f1931 100644 --- a/drivers/char/tpm/tpm_tis_spi.h +++ b/drivers/char/tpm/tpm_tis_spi.h @@ -31,10 +31,6 @@ extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, u8 *in, const u8 *out); -extern int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result); -extern int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result); -extern int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value); - #ifdef CONFIG_TCG_TIS_SPI_CR50 extern int cr50_spi_probe(struct spi_device *spi); #else diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c index 7bf123d3c537..f4937280e940 100644 --- a/drivers/char/tpm/tpm_tis_spi_cr50.c +++ b/drivers/char/tpm/tpm_tis_spi_cr50.c @@ -222,13 +222,13 @@ static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 le } static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *result) + u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, const u8 *value) + u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value); } @@ -236,9 +236,6 @@ static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr, static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = { .read_bytes = tpm_tis_spi_cr50_read_bytes, .write_bytes = tpm_tis_spi_cr50_write_bytes, - .read16 = tpm_tis_spi_read16, - .read32 = tpm_tis_spi_read32, - .write32 = tpm_tis_spi_write32, }; static void cr50_print_fw_version(struct tpm_tis_data *data) diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index 184396b3af50..a0963a3e92bd 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -141,55 +141,17 @@ exit: } static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *result) + u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, const u8 *value) + u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_transfer(data, addr, len, NULL, value); } -int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) -{ - __le16 result_le; - int rc; - - rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), - (u8 *)&result_le); - if (!rc) - *result = le16_to_cpu(result_le); - - return rc; -} - -int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result) -{ - __le32 result_le; - int rc; - - rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), - (u8 *)&result_le); - if (!rc) - *result = le32_to_cpu(result_le); - - return rc; -} - -int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value) -{ - __le32 value_le; - int rc; - - value_le = cpu_to_le32(value); - rc = data->phy_ops->write_bytes(data, addr, sizeof(u32), - (u8 *)&value_le); - - return rc; -} - int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, int irq, const struct tpm_tis_phy_ops *phy_ops) { @@ -205,9 +167,6 @@ int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { .read_bytes = tpm_tis_spi_read_bytes, .write_bytes = tpm_tis_spi_write_bytes, - .read16 = tpm_tis_spi_read16, - .read32 = tpm_tis_spi_read32, - .write32 = tpm_tis_spi_write32, }; static int tpm_tis_spi_probe(struct spi_device *dev) diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c index e47bdd272704..679196c61401 100644 --- a/drivers/char/tpm/tpm_tis_synquacer.c +++ b/drivers/char/tpm/tpm_tis_synquacer.c @@ -35,72 +35,53 @@ static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_da } static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *result) + u16 len, u8 *result, + enum tpm_tis_io_mode io_mode) { struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); - - while (len--) - *result++ = ioread8(phy->iobase + addr); + switch (io_mode) { + case TPM_TIS_PHYS_8: + while (len--) + *result++ = ioread8(phy->iobase + addr); + break; + case TPM_TIS_PHYS_16: + result[1] = ioread8(phy->iobase + addr + 1); + result[0] = ioread8(phy->iobase + addr); + break; + case TPM_TIS_PHYS_32: + result[3] = ioread8(phy->iobase + addr + 3); + result[2] = ioread8(phy->iobase + addr + 2); + result[1] = ioread8(phy->iobase + addr + 1); + result[0] = ioread8(phy->iobase + addr); + break; + } return 0; } static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, const u8 *value) + u16 len, const u8 *value, + enum tpm_tis_io_mode io_mode) { struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); - - while (len--) - iowrite8(*value++, phy->iobase + addr); - - return 0; -} - -static int tpm_tis_synquacer_read16_bw(struct tpm_tis_data *data, - u32 addr, u16 *result) -{ - struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); - - /* - * Due to the limitation of SPI controller on SynQuacer, - * 16/32 bits access must be done in byte-wise and descending order. - */ - *result = (ioread8(phy->iobase + addr + 1) << 8) | - (ioread8(phy->iobase + addr)); - - return 0; -} - -static int tpm_tis_synquacer_read32_bw(struct tpm_tis_data *data, - u32 addr, u32 *result) -{ - struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); - - /* - * Due to the limitation of SPI controller on SynQuacer, - * 16/32 bits access must be done in byte-wise and descending order. - */ - *result = (ioread8(phy->iobase + addr + 3) << 24) | - (ioread8(phy->iobase + addr + 2) << 16) | - (ioread8(phy->iobase + addr + 1) << 8) | - (ioread8(phy->iobase + addr)); - - return 0; -} - -static int tpm_tis_synquacer_write32_bw(struct tpm_tis_data *data, - u32 addr, u32 value) -{ - struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); - - /* - * Due to the limitation of SPI controller on SynQuacer, - * 16/32 bits access must be done in byte-wise and descending order. - */ - iowrite8(value >> 24, phy->iobase + addr + 3); - iowrite8(value >> 16, phy->iobase + addr + 2); - iowrite8(value >> 8, phy->iobase + addr + 1); - iowrite8(value, phy->iobase + addr); + switch (io_mode) { + case TPM_TIS_PHYS_8: + while (len--) + iowrite8(*value++, phy->iobase + addr); + break; + case TPM_TIS_PHYS_16: + return -EINVAL; + case TPM_TIS_PHYS_32: + /* + * Due to the limitation of SPI controller on SynQuacer, + * 16/32 bits access must be done in byte-wise and descending order. + */ + iowrite8(value[3], phy->iobase + addr + 3); + iowrite8(value[2], phy->iobase + addr + 2); + iowrite8(value[1], phy->iobase + addr + 1); + iowrite8(value[0], phy->iobase + addr); + break; + } return 0; } @@ -108,9 +89,6 @@ static int tpm_tis_synquacer_write32_bw(struct tpm_tis_data *data, static const struct tpm_tis_phy_ops tpm_tcg_bw = { .read_bytes = tpm_tis_synquacer_read_bytes, .write_bytes = tpm_tis_synquacer_write_bytes, - .read16 = tpm_tis_synquacer_read16_bw, - .read32 = tpm_tis_synquacer_read32_bw, - .write32 = tpm_tis_synquacer_write32_bw, }; static int tpm_tis_synquacer_init(struct device *dev, diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 69df04ae2401..379291826261 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -253,20 +253,12 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) struct xenbus_transaction xbt; const char *message = NULL; int rv; - grant_ref_t gref; - priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - if (!priv->shr) { - xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); - return -ENOMEM; - } - - rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); + rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1, + &priv->ring_ref); if (rv < 0) return rv; - priv->ring_ref = gref; - rv = xenbus_alloc_evtchn(dev, &priv->evtchn); if (rv) return rv; @@ -331,11 +323,7 @@ static void ring_free(struct tpm_private *priv) if (!priv) return; - if (priv->ring_ref) - gnttab_end_foreign_access(priv->ring_ref, - (unsigned long)priv->shr); - else - free_page((unsigned long)priv->shr); + xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref); if (priv->irq) unbind_from_irqhandler(priv->irq, priv); |