Skip to content

Commit b8e0bed

Browse files
authored
Merge pull request abetlen#453 from wu-qing-157/main
Fix incorrect token_logprobs (due to indexing after sorting)
2 parents d6e6aad + 9e61661 commit b8e0bed

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

llama_cpp/llama.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -964,7 +964,7 @@ def _create_completion(
964964
)
965965
],
966966
"text_offset": [text_offset],
967-
"token_logprobs": [sorted_logprobs[int(token)][0]],
967+
"token_logprobs": [current_logprobs[int(token)]],
968968
"top_logprobs": [top_logprob],
969969
}
970970
returned_tokens += 1
@@ -1039,7 +1039,7 @@ def _create_completion(
10391039
self.detokenize([token]).decode("utf-8", errors="ignore")
10401040
],
10411041
"text_offset": [text_offset],
1042-
"token_logprobs": [sorted_logprobs[int(token)][0]],
1042+
"token_logprobs": [current_logprobs[int(token)]],
10431043
"top_logprobs": [top_logprob],
10441044
}
10451045

@@ -1163,7 +1163,7 @@ def _create_completion(
11631163
zip(logprobs_token, range(len(logprobs_token))), reverse=True
11641164
)
11651165
)
1166-
token_logprobs.append(sorted_logprobs[int(token)][0])
1166+
token_logprobs.append(logprobs_token[int(token)])
11671167
top_logprob: Optional[Dict[str, float]] = {
11681168
self.detokenize([i]).decode("utf-8", errors="ignore"): logprob
11691169
for logprob, i in sorted_logprobs[:logprobs]

0 commit comments

Comments
 (0)