From 466bf4800b75ec29bd2ff75bad8e8973bd98d01c Mon Sep 17 00:00:00 2001 From: Manan Dey Date: Sat, 30 Apr 2022 11:43:51 +0530 Subject: [PATCH] update docs of length_penalty --- src/transformers/generation_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 76086c4b7d6330..f66bbf84b0cb33 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -950,9 +950,9 @@ def generate( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the - model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer - sequences. + Exponential penalty to the length. 1.0 means that the beam score is penalized by the sequence length. 0.0 means no penalty. Set to values < 0.0 in order to encourage the + model to generate longer sequences, to a value > 0.0 in order to encourage the model to produce shorter + sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):