Skip to content

Commit 6d770ae

Browse files
v0.12.6 (run-llama#17305)
1 parent b24e6ea commit 6d770ae

File tree

11 files changed

+269
-77
lines changed

11 files changed

+269
-77
lines changed

CHANGELOG.md

+94
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,95 @@
11
# ChangeLog
22

3+
## [2024-12-17]
4+
5+
### `llama-index-core` [0.12.6]
6+
7+
- [bug fix] Ensure that StopEvent gets cleared from Context.\_in_progress["_done"] after a Workflow run (#17300)
8+
- fix: add a timeout to langchain callback handler (#17296)
9+
- tweak User vs tool in react prompts (#17273)
10+
- refact: Refactor Document to be natively multimodal (#17204)
11+
- fix: make ImageDocument derive from Document, backward compatible (#17259)
12+
- fix: accept already base64-encoded data in ImageBlock (#17244)
13+
- fix(metrics): fixed NDCG calculation and updated previous tests (#17236)
14+
- fix: remove llama-index-legacy dependency in llama-index-core (#17231)
15+
- Refined the default documentation generation for function tools (#17208)
16+
17+
### `llama-index-embeddings-voyageai` [0.3.3]
18+
19+
- add support for voyageai >=0.3.0 (#17120)
20+
- Introducting VoyageAI's new multimodal embeddings model (#17261)
21+
- VoyageAI multimodal embedding, correction (#17284)
22+
23+
### `llama-index-experimental` [0.5.2]
24+
25+
- Fixed import errors for experimental JSONalyzeQueryEngine (#17228)
26+
27+
### `llama-index-grapg-stores-neo4j` [0.4.4]
28+
29+
- Add cypher corrector and allow graph schema filtering (#17223)
30+
- Add timeout config to neo4j graph (#17267)
31+
- Add text and embedding type to neo4j enhanced schema (#17289)
32+
33+
### `llama-index-llms-anthropic` [0.6.3]
34+
35+
- add content blocks to anthropic (#17274)
36+
- Do not send blank content to anthropic (#17278)
37+
- Update anthropic type imports for v0.41.0 release (#17299)
38+
- Fix Anthropic tokenizer protocol (fix by Devin) (#17201)
39+
40+
### `llama-index-llms-bedrock` [0.3.3]
41+
42+
- Add Amazon bedrock guardrails (#17281)
43+
44+
### `llama-index-llms-bedrock-converse` [0.4.2]
45+
46+
- Add Amazon bedrock guardrails (#17281)
47+
48+
### `llama-index-llms-gemini` [0.4.1]
49+
50+
- Gemini 2.0 support (#17249)
51+
52+
### `llama-index-llms-mistralai` [0.3.1]
53+
54+
- add tool call id/name to mistral chat messages (#17280)
55+
56+
### `llama-index-llms-nvidia` [0.3.1]
57+
58+
- Adding llama 3.3-70b as function-calling-capable (#17253)
59+
60+
### `llama-index-llms-openai` [0.3.10]
61+
62+
- fix openai message dicts for tool calls (#17254)
63+
64+
### `llama-index-llms-text-generation-inference` [0.3.1]
65+
66+
- Fix: TGI context window (#17252)
67+
68+
### `llama-index-multi-modal-llms-anthropic` [0.3.1]
69+
70+
- handle more response types in anthropic multi modal llms (#17302)
71+
72+
### `llama-index-readers-confluence` [0.3.1]
73+
74+
- Support Confluence cookies (#17276)
75+
76+
### `llama-index-vector-stores-milvus` [0.4.0]
77+
78+
- Parse "milvus_search_config" out of "vector_store_kwargs" (#17221)
79+
- refactor and optimize milvus code (#17229)
80+
81+
### `llama-index-vector-stores-pinecone` [0.4.2]
82+
83+
- Handle empty retrieved Pinecone index values (#17242)
84+
85+
### `llama-index-vector-stores-qdrant` [0.4.1]
86+
87+
- feat: Add NOT filter condition to MetadataFilter and QdrantVectorStore (#17270)
88+
89+
### `llama-index-vector-stores-weaviate` [1.3.0]
90+
91+
- Add async support to weaviate vector store integration (#17220)
92+
393
## [2024-12-09]
494

595
### `llama-index-core` [0.12.5]
@@ -14,6 +104,10 @@
14104

15105
- feat: integration on pinecone hosted rerankers (#17192)
16106

107+
### `llama-index-tools-scrapegraph` [0.1.0]
108+
109+
- Add Scrapegraph tool integration (#17238)
110+
17111
### `llama-index-vector-stores-postgres` [0.3.3]
18112

19113
- Update pgvector dependency to version 0.3.6 (#17195)

docs/docs/CHANGELOG.md

+94
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,95 @@
11
# ChangeLog
22

3+
## [2024-12-17]
4+
5+
### `llama-index-core` [0.12.6]
6+
7+
- [bug fix] Ensure that StopEvent gets cleared from Context._in_progress["_done"] after a Workflow run (#17300)
8+
- fix: add a timeout to langchain callback handler (#17296)
9+
- tweak User vs tool in react prompts (#17273)
10+
- refact: Refactor Document to be natively multimodal (#17204)
11+
- fix: make ImageDocument derive from Document, backward compatible (#17259)
12+
- fix: accept already base64-encoded data in ImageBlock (#17244)
13+
- fix(metrics): fixed NDCG calculation and updated previous tests (#17236)
14+
- fix: remove llama-index-legacy dependency in llama-index-core (#17231)
15+
- Refined the default documentation generation for function tools (#17208)
16+
17+
### `llama-index-embeddings-voyageai` [0.3.3]
18+
19+
- add support for voyageai >=0.3.0 (#17120)
20+
- Introducting VoyageAI's new multimodal embeddings model (#17261)
21+
- VoyageAI multimodal embedding, correction (#17284)
22+
23+
### `llama-index-experimental` [0.5.2]
24+
25+
- Fixed import errors for experimental JSONalyzeQueryEngine (#17228)
26+
27+
### `llama-index-grapg-stores-neo4j` [0.4.4]
28+
29+
- Add cypher corrector and allow graph schema filtering (#17223)
30+
- Add timeout config to neo4j graph (#17267)
31+
- Add text and embedding type to neo4j enhanced schema (#17289)
32+
33+
### `llama-index-llms-anthropic` [0.6.3]
34+
35+
- add content blocks to anthropic (#17274)
36+
- Do not send blank content to anthropic (#17278)
37+
- Update anthropic type imports for v0.41.0 release (#17299)
38+
- Fix Anthropic tokenizer protocol (fix by Devin) (#17201)
39+
40+
### `llama-index-llms-bedrock` [0.3.3]
41+
42+
- Add Amazon bedrock guardrails (#17281)
43+
44+
### `llama-index-llms-bedrock-converse` [0.4.2]
45+
46+
- Add Amazon bedrock guardrails (#17281)
47+
48+
### `llama-index-llms-gemini` [0.4.1]
49+
50+
- Gemini 2.0 support (#17249)
51+
52+
### `llama-index-llms-mistralai` [0.3.1]
53+
54+
- add tool call id/name to mistral chat messages (#17280)
55+
56+
### `llama-index-llms-nvidia` [0.3.1]
57+
58+
- Adding llama 3.3-70b as function-calling-capable (#17253)
59+
60+
### `llama-index-llms-openai` [0.3.10]
61+
62+
- fix openai message dicts for tool calls (#17254)
63+
64+
### `llama-index-llms-text-generation-inference` [0.3.1]
65+
66+
- Fix: TGI context window (#17252)
67+
68+
### `llama-index-multi-modal-llms-anthropic` [0.3.1]
69+
70+
- handle more response types in anthropic multi modal llms (#17302)
71+
72+
### `llama-index-readers-confluence` [0.3.1]
73+
74+
- Support Confluence cookies (#17276)
75+
76+
### `llama-index-vector-stores-milvus` [0.4.0]
77+
78+
- Parse "milvus_search_config" out of "vector_store_kwargs" (#17221)
79+
- refactor and optimize milvus code (#17229)
80+
81+
### `llama-index-vector-stores-pinecone` [0.4.2]
82+
83+
- Handle empty retrieved Pinecone index values (#17242)
84+
85+
### `llama-index-vector-stores-qdrant` [0.4.1]
86+
87+
- feat: Add NOT filter condition to MetadataFilter and QdrantVectorStore (#17270)
88+
89+
### `llama-index-vector-stores-weaviate` [1.3.0]
90+
91+
- Add async support to weaviate vector store integration (#17220)
92+
393
## [2024-12-09]
494

595
### `llama-index-core` [0.12.5]
@@ -14,6 +104,10 @@
14104

15105
- feat: integration on pinecone hosted rerankers (#17192)
16106

107+
### `llama-index-tools-scrapegraph` [0.1.0]
108+
109+
- Add Scrapegraph tool integration (#17238)
110+
17111
### `llama-index-vector-stores-postgres` [0.3.3]
18112

19113
- Update pgvector dependency to version 0.3.6 (#17195)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
::: llama_index.tools.scrapegraph
2+
options:
3+
members:
4+
- ScrapegraphToolSpec

docs/mkdocs.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ nav:
151151
- ./examples/cookbooks/contextual_retrieval.ipynb
152152
- ./examples/cookbooks/crewai_llamaindex.ipynb
153153
- ./examples/cookbooks/llama3_cookbook.ipynb
154+
- ./examples/cookbooks/llama3_cookbook_gaudi.ipynb
154155
- ./examples/cookbooks/llama3_cookbook_groq.ipynb
155156
- ./examples/cookbooks/llama3_cookbook_ollama_replicate.ipynb
156157
- ./examples/cookbooks/mistralai.ipynb
@@ -256,7 +257,6 @@ nav:
256257
- ./examples/embeddings/nomic.ipynb
257258
- ./examples/embeddings/nvidia.ipynb
258259
- ./examples/embeddings/oci_genai.ipynb
259-
- ./examples/embeddings/octoai.ipynb
260260
- ./examples/embeddings/ollama_embedding.ipynb
261261
- ./examples/embeddings/openvino.ipynb
262262
- ./examples/embeddings/optimum_intel.ipynb
@@ -312,6 +312,7 @@ nav:
312312
- ./examples/ingestion/document_management_pipeline.ipynb
313313
- ./examples/ingestion/ingestion_gdrive.ipynb
314314
- ./examples/ingestion/parallel_execution_ingestion_pipeline.ipynb
315+
- ./examples/ingestion/redis_ingestion_pipeline.ipynb
315316
- LLMs:
316317
- ./examples/llm/ai21.ipynb
317318
- ./examples/llm/alephalpha.ipynb
@@ -384,9 +385,7 @@ nav:
384385
- ./examples/llm/rungpt.ipynb
385386
- ./examples/llm/sagemaker_endpoint_llm.ipynb
386387
- ./examples/llm/sambanovasystems.ipynb
387-
- ./examples/llm/solar.ipynb
388388
- ./examples/llm/together.ipynb
389-
- ./examples/llm/unify.ipynb
390389
- ./examples/llm/upstage.ipynb
391390
- ./examples/llm/vertex.ipynb
392391
- ./examples/llm/vicuna.ipynb
@@ -642,7 +641,6 @@ nav:
642641
- ./examples/vector_stores/LanternAutoRetriever.ipynb
643642
- ./examples/vector_stores/LanternIndexDemo.ipynb
644643
- ./examples/vector_stores/LindormDemo.ipynb
645-
- ./examples/vector_stores/MetalIndexDemo.ipynb
646644
- ./examples/vector_stores/MilvusHybridIndexDemo.ipynb
647645
- ./examples/vector_stores/MilvusIndexDemo.ipynb
648646
- ./examples/vector_stores/MilvusOperatorFunctionDemo.ipynb
@@ -1647,6 +1645,7 @@ nav:
16471645
- ./api_reference/tools/requests.md
16481646
- ./api_reference/tools/retriever.md
16491647
- ./api_reference/tools/salesforce.md
1648+
- ./api_reference/tools/scrapegraph.md
16501649
- ./api_reference/tools/shopify.md
16511650
- ./api_reference/tools/slack.md
16521651
- ./api_reference/tools/tavily_research.md
@@ -2308,6 +2307,7 @@ plugins:
23082307
- ../llama-index-integrations/llms/llama-index-llms-nebius
23092308
- ../llama-index-integrations/postprocessor/llama-index-postprocessor-bedrock-rerank
23102309
- ../llama-index-integrations/postprocessor/llama-index-postprocessor-pinecone-native-rerank
2310+
- ../llama-index-integrations/tools/llama-index-tools-scrapegraph
23112311
- redirects:
23122312
redirect_maps:
23132313
./api/llama_index.vector_stores.MongoDBAtlasVectorSearch.html: api_reference/storage/vector_store/mongodb.md

llama-index-core/llama_index/core/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Init file of LlamaIndex."""
22

3-
__version__ = "0.12.5"
3+
__version__ = "0.12.6"
44

55
import logging
66
from logging import NullHandler

llama-index-core/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ name = "llama-index-core"
4646
packages = [{include = "llama_index"}]
4747
readme = "README.md"
4848
repository = "https://github.com/run-llama/llama_index"
49-
version = "0.12.5"
49+
version = "0.12.6"
5050

5151
[tool.poetry.dependencies]
5252
SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}

llama-index-integrations/vector_stores/llama-index-vector-stores-nile/pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-vector-stores-nile"
2929
readme = "README.md"
30-
version = "0.2.0"
30+
version = "0.2.1"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<4.0"
34-
llama-index-core = "^0.12.0"
34+
llama-index-core = "^0.12.6"
3535
psycopg = "^3.2"
3636

3737
[tool.poetry.group.dev.dependencies]

llama-index-integrations/vector_stores/llama-index-vector-stores-postgres/pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,14 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-vector-stores-postgres"
2929
readme = "README.md"
30-
version = "0.4.0"
30+
version = "0.4.1"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<4.0"
3434
pgvector = ">=0.3.6,<1.0.0"
3535
psycopg2-binary = ">=2.9.9,<3.0.0"
3636
asyncpg = ">=0.29.0,<1.0.0"
37-
llama-index-core = "^0.12.0"
37+
llama-index-core = "^0.12.6"
3838

3939
[tool.poetry.dependencies.sqlalchemy]
4040
extras = ["asyncio"]

llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
2727
license = "MIT"
2828
name = "llama-index-vector-stores-qdrant"
2929
readme = "README.md"
30-
version = "0.4.0"
30+
version = "0.4.1"
3131

3232
[tool.poetry.dependencies]
3333
python = ">=3.9,<3.13"
3434
qdrant-client = ">=1.7.1"
3535
grpcio = "^1.60.0"
36-
llama-index-core = "^0.12.0"
36+
llama-index-core = "^0.12.6"
3737

3838
[tool.poetry.extras]
3939
fastembed = ["fastembed"]

0 commit comments

Comments
 (0)