Skip to content

Commit 1183f72

Browse files
v0.12.12 (run-llama#17561)
1 parent 637c79d commit 1183f72

File tree

11 files changed

+160
-73
lines changed

11 files changed

+160
-73
lines changed

CHANGELOG.md

+35
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,40 @@
11
# ChangeLog
22

3+
## [2025-01-20]
4+
5+
### `llama-index-core` [0.12.12]
6+
7+
- feat: add AgentWorkflow system to support single and multi-agent workflows (#17237)
8+
- Fix image-path validation in ImageNode (#17558)
9+
10+
### `llama-index-indices-managed-vectara` [0.4.0]
11+
12+
- (breaking change) API Migration (#17545)
13+
14+
### `llama-index-llms-anthropic` [0.6.4]
15+
16+
- feat: support direct PDF handling for Anthropic (#17506)
17+
18+
### `llama-index-llms-fireworks` [0.3.1]
19+
20+
- Deepseek-v3 is now supported by fireworks (#17518)
21+
22+
### `llama-index-llms-stepfun` [1.0.0]
23+
24+
- feat: add stepfun integrations (#17514)
25+
26+
### `llama-index-multi-modal-llms-gemini` [0.5.0]
27+
28+
- refact: make GeminiMultiModal a thin wrapper around Gemini (#17501)
29+
30+
### `llama-index-postprocessor-longllmlingua` [0.4.0]
31+
32+
- Add longllmlingua2 integration (#17531)
33+
34+
### `llama-index-readers-web` [0.3.4]
35+
36+
- feat: Hyperbrowser Web Reader (#17489)
37+
338
## [2025-01-15]
439

540
### `llama-index-core` [0.12.11]

docs/docs/CHANGELOG.md

+35
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,40 @@
11
# ChangeLog
22

3+
## [2025-01-20]
4+
5+
### `llama-index-core` [0.12.12]
6+
7+
- feat: add AgentWorkflow system to support single and multi-agent workflows (#17237)
8+
- Fix image-path validation in ImageNode (#17558)
9+
10+
### `llama-index-indices-managed-vectara` [0.4.0]
11+
12+
- (breaking change) API Migration (#17545)
13+
14+
### `llama-index-llms-anthropic` [0.6.4]
15+
16+
- feat: support direct PDF handling for Anthropic (#17506)
17+
18+
### `llama-index-llms-fireworks` [0.3.1]
19+
20+
- Deepseek-v3 is now supported by fireworks (#17518)
21+
22+
### `llama-index-llms-stepfun` [1.0.0]
23+
24+
- feat: add stepfun integrations (#17514)
25+
26+
### `llama-index-multi-modal-llms-gemini` [0.5.0]
27+
28+
- refact: make GeminiMultiModal a thin wrapper around Gemini (#17501)
29+
30+
### `llama-index-postprocessor-longllmlingua` [0.4.0]
31+
32+
- Add longllmlingua2 integration (#17531)
33+
34+
### `llama-index-readers-web` [0.3.4]
35+
36+
- feat: Hyperbrowser Web Reader (#17489)
37+
338
## [2025-01-15]
439

540
### `llama-index-core` [0.12.11]

llama-index-core/llama_index/core/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Init file of LlamaIndex."""
22

3-
__version__ = "0.12.11"
3+
__version__ = "0.12.12"
44

55
import logging
66
from logging import NullHandler

llama-index-core/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ name = "llama-index-core"
4646
packages = [{include = "llama_index"}]
4747
readme = "README.md"
4848
repository = "https://github.com/run-llama/llama_index"
49-
version = "0.12.11"
49+
version = "0.12.12"
5050

5151
[tool.poetry.dependencies]
5252
SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}

llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-llms-gemini"
2929
readme = "README.md"
30-
version = "0.4.3"
30+
version = "0.4.4"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<4.0"
3434
pillow = "^10.2.0"
3535
google-generativeai = ">=0.5.2"
36-
llama-index-core = "^0.12.0"
36+
llama-index-core = "^0.12.12"
3737

3838
[tool.poetry.group.dev.dependencies]
3939
ipython = "8.10.0"

llama-index-integrations/llms/llama-index-llms-ibm/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ readme = "README.md"
3434
version = "0.3.1"
3535

3636
[tool.poetry.dependencies]
37-
python = ">=3.10,<4.0"
37+
python = ">=3.10,<3.13"
3838
ibm-watsonx-ai = ">=1.1.24"
3939
pyarrow = "*"
4040
llama-index-core = "^0.12.0"

llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-multi-modal-llms-gemini"
2929
readme = "README.md"
30-
version = "0.4.1"
30+
version = "0.5.0"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<4.0"
34-
llama-index-llms-gemini = "^0.4.0"
34+
llama-index-llms-gemini = "^0.4.4"
3535
pillow = "^10.2.0"
36-
llama-index-core = "^0.12.0"
36+
llama-index-core = "^0.12.12"
3737

3838
[tool.poetry.group.dev.dependencies]
3939
ipython = "8.10.0"

llama-index-integrations/retrievers/llama-index-retrievers-bm25/llama_index/retrievers/bm25/base.py

+19-3
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333

3434

3535
class BM25Retriever(BaseRetriever):
36-
"""A BM25 retriever that uses the BM25 algorithm to retrieve nodes.
36+
r"""A BM25 retriever that uses the BM25 algorithm to retrieve nodes.
3737
3838
Args:
3939
nodes (List[BaseNode], optional):
@@ -52,6 +52,10 @@ class BM25Retriever(BaseRetriever):
5252
The objects to retrieve. Defaults to None.
5353
object_map (dict, optional):
5454
A map of object IDs to nodes. Defaults to None.
55+
token_pattern (str, optional):
56+
The token pattern to use. Defaults to (?u)\\b\\w\\w+\\b.
57+
skip_stemming (bool, optional):
58+
Whether to skip stemming. Defaults to False.
5559
verbose (bool, optional):
5660
Whether to show progress. Defaults to False.
5761
"""
@@ -67,9 +71,13 @@ def __init__(
6771
objects: Optional[List[IndexNode]] = None,
6872
object_map: Optional[dict] = None,
6973
verbose: bool = False,
74+
skip_stemming: bool = False,
75+
token_pattern: str = r"(?u)\b\w\w+\b",
7076
) -> None:
7177
self.stemmer = stemmer or Stemmer.Stemmer("english")
7278
self.similarity_top_k = similarity_top_k
79+
self.token_pattern = token_pattern
80+
self.skip_stemming = skip_stemming
7381

7482
if existing_bm25 is not None:
7583
self.bm25 = existing_bm25
@@ -83,7 +91,8 @@ def __init__(
8391
corpus_tokens = bm25s.tokenize(
8492
[node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
8593
stopwords=language,
86-
stemmer=self.stemmer,
94+
stemmer=self.stemmer if not skip_stemming else None,
95+
token_pattern=self.token_pattern,
8796
show_progress=verbose,
8897
)
8998
self.bm25 = bm25s.BM25()
@@ -105,6 +114,8 @@ def from_defaults(
105114
language: str = "en",
106115
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
107116
verbose: bool = False,
117+
skip_stemming: bool = False,
118+
token_pattern: str = r"(?u)\b\w\w+\b",
108119
# deprecated
109120
tokenizer: Optional[Callable[[str], List[str]]] = None,
110121
) -> "BM25Retriever":
@@ -134,6 +145,8 @@ def from_defaults(
134145
language=language,
135146
similarity_top_k=similarity_top_k,
136147
verbose=verbose,
148+
skip_stemming=skip_stemming,
149+
token_pattern=token_pattern,
137150
)
138151

139152
def get_persist_args(self) -> Dict[str, Any]:
@@ -161,7 +174,10 @@ def from_persist_dir(cls, path: str, **kwargs: Any) -> "BM25Retriever":
161174
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
162175
query = query_bundle.query_str
163176
tokenized_query = bm25s.tokenize(
164-
query, stemmer=self.stemmer, show_progress=self._verbose
177+
query,
178+
stemmer=self.stemmer if not self.skip_stemming else None,
179+
token_pattern=self.token_pattern,
180+
show_progress=self._verbose,
165181
)
166182
indexes, scores = self.bm25.retrieve(
167183
tokenized_query, k=self.similarity_top_k, show_progress=self._verbose

llama-index-integrations/retrievers/llama-index-retrievers-bm25/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-retrievers-bm25"
2929
readme = "README.md"
30-
version = "0.5.0"
30+
version = "0.5.1"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<4.0"

0 commit comments

Comments
 (0)