reverse window
This commit is contained in:
parent
150c499cae
commit
698a92aa4a
207
llama/patches/0017-add-ollama-vocab-for-grammar-support.patch
Normal file
207
llama/patches/0017-add-ollama-vocab-for-grammar-support.patch
Normal file
@ -0,0 +1,207 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: ParthSareen <parth.sareen@ollama.com>
|
||||
Date: Mon, 21 Apr 2025 13:30:31 -0700
|
||||
Subject: [PATCH] add ollama vocab for grammar support
|
||||
|
||||
---
|
||||
src/llama-grammar.cpp | 49 ++++++++++++++++++++++++++++++++++++------
|
||||
src/llama-grammar.h | 14 ++++++++++++
|
||||
src/llama-sampling.cpp | 4 ++--
|
||||
3 files changed, 58 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp
|
||||
index 973b47ae..60d58236 100644
|
||||
--- a/src/llama-grammar.cpp
|
||||
+++ b/src/llama-grammar.cpp
|
||||
@@ -907,6 +907,7 @@ llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
|
||||
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
+ const struct ollama_vocab * ollama_vocab,
|
||||
const llama_grammar_element ** rules,
|
||||
size_t n_rules,
|
||||
size_t start_rule_index) {
|
||||
@@ -962,6 +963,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
||||
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
||||
return new llama_grammar {
|
||||
vocab,
|
||||
+ ollama_vocab,
|
||||
std::move(vec_rules),
|
||||
std::move(stacks),
|
||||
/* .partial_utf8 = */ {},
|
||||
@@ -975,6 +977,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
||||
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
+ const struct ollama_vocab * ollama_vocab,
|
||||
const char * grammar_str,
|
||||
const char * grammar_root,
|
||||
bool lazy,
|
||||
@@ -1067,6 +1070,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
||||
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
||||
return new llama_grammar {
|
||||
vocab,
|
||||
+ ollama_vocab,
|
||||
std::move(vec_rules),
|
||||
std::move(stacks),
|
||||
/* .partial_utf8 = */ {},
|
||||
@@ -1089,6 +1093,7 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
||||
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
||||
auto * result = new llama_grammar {
|
||||
grammar.vocab,
|
||||
+ grammar.o_vocab,
|
||||
grammar.rules,
|
||||
grammar.stacks,
|
||||
grammar.partial_utf8,
|
||||
@@ -1116,7 +1121,6 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
||||
}
|
||||
|
||||
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
||||
- GGML_ASSERT(grammar.vocab != nullptr);
|
||||
|
||||
if (grammar.awaiting_trigger) {
|
||||
return;
|
||||
@@ -1138,9 +1142,13 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
||||
|
||||
for (size_t i = 0; i < cur_p->size; ++i) {
|
||||
const llama_token id = cur_p->data[i].id;
|
||||
- const std::string & piece = grammar.vocab->token_to_piece(id);
|
||||
+ const std::string piece = grammar.o_vocab ?
|
||||
+ grammar.o_vocab->token_to_piece(id) :
|
||||
+ grammar.vocab->token_to_piece(id);
|
||||
|
||||
- if (grammar.vocab->is_eog(id)) {
|
||||
+ const bool is_eog = grammar.o_vocab ? grammar.o_vocab->is_eog(id) : grammar.vocab->is_eog(id);
|
||||
+
|
||||
+ if (is_eog) {
|
||||
if (!allow_eog) {
|
||||
cur_p->data[i].logit = -INFINITY;
|
||||
}
|
||||
@@ -1159,9 +1167,10 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
||||
}
|
||||
|
||||
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
||||
- GGML_ASSERT(grammar.vocab != nullptr);
|
||||
|
||||
- const auto & piece = grammar.vocab->token_to_piece(token);
|
||||
+ const std::string piece = grammar.o_vocab ?
|
||||
+ grammar.o_vocab->token_to_piece(token) :
|
||||
+ grammar.vocab->token_to_piece(token);
|
||||
|
||||
if (grammar.awaiting_trigger) {
|
||||
if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) {
|
||||
@@ -1191,13 +1200,14 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
||||
}
|
||||
}
|
||||
|
||||
- if (grammar.vocab->is_eog(token)) {
|
||||
+ const bool is_eog = grammar.o_vocab ? grammar.o_vocab->is_eog(token) : grammar.vocab->is_eog(token);
|
||||
+ if (is_eog) {
|
||||
for (const auto & stack : grammar.stacks) {
|
||||
if (stack.empty()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
- GGML_ABORT("fatal error");
|
||||
+ GGML_ABORT("grammar error: end of grammar token received but grammar stack is not empty");
|
||||
}
|
||||
|
||||
llama_grammar_accept_str(grammar, piece);
|
||||
@@ -1217,3 +1227,28 @@ void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string
|
||||
throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece);
|
||||
}
|
||||
}
|
||||
+
|
||||
+
|
||||
+const std::string & ollama_vocab::token_to_piece(const uint32_t token) const {
|
||||
+ try {
|
||||
+ return token_to_piece_map.at(token);
|
||||
+ } catch (const std::out_of_range&) {
|
||||
+ throw std::runtime_error("Token not found in vocabulary: " + std::to_string(token));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+void ollama_vocab::add_token_pieces(const uint32_t* tokens, size_t n_tokens, const char** pieces) {
|
||||
+ for (size_t i = 0; i < n_tokens; i++) {
|
||||
+ token_to_piece_map[tokens[i]] = pieces[i];
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+bool ollama_vocab::is_eog(const uint32_t token) const {
|
||||
+ return special_eog_ids.count(token) > 0;
|
||||
+}
|
||||
+
|
||||
+void ollama_vocab::set_eog_tokens(const uint32_t* tokens, size_t n_tokens) {
|
||||
+ for (size_t i = 0; i < n_tokens; i++) {
|
||||
+ special_eog_ids.insert(tokens[i]);
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/llama-grammar.h b/src/llama-grammar.h
|
||||
index f8c291de..2a3a62db 100644
|
||||
--- a/src/llama-grammar.h
|
||||
+++ b/src/llama-grammar.h
|
||||
@@ -6,8 +6,19 @@
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
+#include <set>
|
||||
|
||||
struct llama_vocab;
|
||||
+struct ollama_vocab {
|
||||
+ std::map<uint32_t, std::string> token_to_piece_map;
|
||||
+ std::set<uint32_t> special_eog_ids;
|
||||
+
|
||||
+ const std::string & token_to_piece(const uint32_t token) const;
|
||||
+ void add_token_pieces(const uint32_t* tokens, size_t n_tokens, const char** pieces);
|
||||
+ void set_eog_tokens(const uint32_t* tokens, size_t n_tokens);
|
||||
+ bool is_eog(const uint32_t token) const;
|
||||
+
|
||||
+};
|
||||
|
||||
// grammar element type
|
||||
enum llama_gretype {
|
||||
@@ -114,6 +125,7 @@ struct llama_grammar_trigger_pattern {
|
||||
struct llama_grammar {
|
||||
// note: allow null vocab for testing (not great)
|
||||
const llama_vocab * vocab;
|
||||
+ const ollama_vocab * o_vocab;
|
||||
|
||||
const llama_grammar_rules rules; // TODO: shared ptr
|
||||
llama_grammar_stacks stacks;
|
||||
@@ -141,12 +153,14 @@ struct llama_grammar {
|
||||
// note: needed for tests (not great)
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
+ const struct ollama_vocab * ollama_vocab,
|
||||
const llama_grammar_element ** rules,
|
||||
size_t n_rules,
|
||||
size_t start_rule_index);
|
||||
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
+ const struct ollama_vocab * ollama_vocab,
|
||||
const char * grammar_str,
|
||||
const char * grammar_root,
|
||||
bool lazy,
|
||||
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
|
||||
index c0a5f934..75731053 100644
|
||||
--- a/src/llama-sampling.cpp
|
||||
+++ b/src/llama-sampling.cpp
|
||||
@@ -1466,7 +1466,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
||||
trigger_patterns_c.push_back(trigger_pattern.pattern.c_str());
|
||||
}
|
||||
|
||||
- auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
||||
+ auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, nullptr, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
||||
ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(),
|
||||
ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
|
||||
|
||||
@@ -1548,7 +1548,7 @@ static struct llama_sampler * llama_sampler_init_grammar_impl(
|
||||
/* .vocab = */ vocab,
|
||||
/* .grammar_str = */ grammar_str,
|
||||
/* .grammar_root = */ grammar_root,
|
||||
- /* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens),
|
||||
+ /* .grammar = */ llama_grammar_init_impl(vocab, nullptr, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens),
|
||||
};
|
||||
if (!ctx->grammar) {
|
||||
delete ctx;
|
70
llama/patches/0018-add-argsort-for-int32_t.patch
Normal file
70
llama/patches/0018-add-argsort-for-int32_t.patch
Normal file
@ -0,0 +1,70 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <git@mxy.ng>
|
||||
Date: Thu, 1 May 2025 13:45:12 -0700
|
||||
Subject: [PATCH] add argsort for int32_t
|
||||
|
||||
---
|
||||
ggml/src/ggml-cpu/ops.cpp | 43 +++++++++++++++++++++++++++++++++++++++
|
||||
1 file changed, 43 insertions(+)
|
||||
|
||||
diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp
|
||||
index 66b8da68..1ad571d3 100644
|
||||
--- a/ggml/src/ggml-cpu/ops.cpp
|
||||
+++ b/ggml/src/ggml-cpu/ops.cpp
|
||||
@@ -6718,6 +6718,45 @@ static void ggml_compute_forward_argsort_f32(
|
||||
}
|
||||
}
|
||||
|
||||
+static void ggml_compute_forward_argsort_i32(
|
||||
+ const ggml_compute_params * params,
|
||||
+ ggml_tensor * dst) {
|
||||
+
|
||||
+ const ggml_tensor * src0 = dst->src[0];
|
||||
+
|
||||
+ GGML_TENSOR_UNARY_OP_LOCALS
|
||||
+
|
||||
+ GGML_ASSERT(nb0 == sizeof(int32_t));
|
||||
+
|
||||
+ const int ith = params->ith;
|
||||
+ const int nth = params->nth;
|
||||
+
|
||||
+ const int64_t nr = ggml_nrows(src0);
|
||||
+
|
||||
+ ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0);
|
||||
+
|
||||
+ for (int64_t i = ith; i < nr; i += nth) {
|
||||
+ int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
|
||||
+ const int32_t * src_data = (int32_t *)((char *) src0->data + i*nb01);
|
||||
+
|
||||
+ for (int64_t j = 0; j < ne0; j++) {
|
||||
+ dst_data[j] = j;
|
||||
+ }
|
||||
+
|
||||
+ // C doesn't have a functional sort, so we do a bubble sort instead
|
||||
+ for (int64_t j = 0; j < ne0; j++) {
|
||||
+ for (int64_t k = j + 1; k < ne0; k++) {
|
||||
+ if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
|
||||
+ (order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
|
||||
+ int32_t tmp = dst_data[j];
|
||||
+ dst_data[j] = dst_data[k];
|
||||
+ dst_data[k] = tmp;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void ggml_compute_forward_argsort(
|
||||
const ggml_compute_params * params,
|
||||
ggml_tensor * dst) {
|
||||
@@ -6729,6 +6768,10 @@ void ggml_compute_forward_argsort(
|
||||
{
|
||||
ggml_compute_forward_argsort_f32(params, dst);
|
||||
} break;
|
||||
+ case GGML_TYPE_I32:
|
||||
+ {
|
||||
+ ggml_compute_forward_argsort_i32(params, dst);
|
||||
+ } break;
|
||||
default:
|
||||
{
|
||||
GGML_ABORT("fatal error");
|
@ -225,6 +225,7 @@ type Tensor interface {
|
||||
Duplicate(ctx Context) Tensor
|
||||
|
||||
TopK(ctx Context, k int) Tensor
|
||||
Argsort(ctx Context) Tensor
|
||||
}
|
||||
|
||||
// ScaledDotProductAttention implements a fused attention
|
||||
|
@ -1256,3 +1256,10 @@ func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
|
||||
t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
|
||||
return &Tensor{
|
||||
b: t.b,
|
||||
t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
|
||||
}
|
||||
}
|
||||
|
43
ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp
vendored
43
ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp
vendored
@ -6877,6 +6877,45 @@ static void ggml_compute_forward_argsort_f32(
|
||||
}
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_argsort_i32(
|
||||
const ggml_compute_params * params,
|
||||
ggml_tensor * dst) {
|
||||
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
|
||||
GGML_TENSOR_UNARY_OP_LOCALS
|
||||
|
||||
GGML_ASSERT(nb0 == sizeof(int32_t));
|
||||
|
||||
const int ith = params->ith;
|
||||
const int nth = params->nth;
|
||||
|
||||
const int64_t nr = ggml_nrows(src0);
|
||||
|
||||
ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0);
|
||||
|
||||
for (int64_t i = ith; i < nr; i += nth) {
|
||||
int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
|
||||
const int32_t * src_data = (int32_t *)((char *) src0->data + i*nb01);
|
||||
|
||||
for (int64_t j = 0; j < ne0; j++) {
|
||||
dst_data[j] = j;
|
||||
}
|
||||
|
||||
// C doesn't have a functional sort, so we do a bubble sort instead
|
||||
for (int64_t j = 0; j < ne0; j++) {
|
||||
for (int64_t k = j + 1; k < ne0; k++) {
|
||||
if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
|
||||
(order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
|
||||
int32_t tmp = dst_data[j];
|
||||
dst_data[j] = dst_data[k];
|
||||
dst_data[k] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ggml_compute_forward_argsort(
|
||||
const ggml_compute_params * params,
|
||||
ggml_tensor * dst) {
|
||||
@ -6888,6 +6927,10 @@ void ggml_compute_forward_argsort(
|
||||
{
|
||||
ggml_compute_forward_argsort_f32(params, dst);
|
||||
} break;
|
||||
case GGML_TYPE_I32:
|
||||
{
|
||||
ggml_compute_forward_argsort_i32(params, dst);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
GGML_ABORT("fatal error");
|
||||
|
@ -245,7 +245,9 @@ func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor, grid *Grid)
|
||||
}
|
||||
}
|
||||
|
||||
return m.PatchMerger.Forward(ctx, hiddenStates, m.VisionModelOptions)
|
||||
hiddenStates = m.PatchMerger.Forward(ctx, hiddenStates, m.VisionModelOptions)
|
||||
reverseWindowIndex := windowIndex.Argsort(ctx)
|
||||
return hiddenStates.Rows(ctx, reverseWindowIndex)
|
||||
}
|
||||
|
||||
// windowIndex divides the grid into windows and returns:
|
||||
|
Loading…
x
Reference in New Issue
Block a user