3
3
import httpx
4
4
5
5
from llama_cloud .client import AsyncLlamaCloud
6
- from llama_cloud_services import LlamaParse
6
+ from llama_cloud_services import LlamaCloudIndex , LlamaParse
7
+ from llama_cloud_services .parse import ResultType
7
8
8
9
# deployed agents may infer their name from the deployment name
9
10
# Note: Make sure that an agent deployment with this name actually exists
18
19
INDEX_NAME = "document_qa_index"
19
20
20
21
21
- def get_custom_client () -> httpx .AsyncClient :
22
+ @functools .cache
23
+ def get_base_cloud_client () -> httpx .AsyncClient :
22
24
return httpx .AsyncClient (
23
25
timeout = 60 ,
24
26
headers = {"Project-Id" : LLAMA_CLOUD_PROJECT_ID }
@@ -32,7 +34,7 @@ def get_llama_cloud_client() -> AsyncLlamaCloud:
32
34
return AsyncLlamaCloud (
33
35
base_url = LLAMA_CLOUD_BASE_URL ,
34
36
token = LLAMA_CLOUD_API_KEY ,
35
- httpx_client = get_custom_client (),
37
+ httpx_client = get_base_cloud_client (),
36
38
)
37
39
38
40
@@ -45,8 +47,20 @@ def get_llama_parse_client() -> LlamaParse:
45
47
adaptive_long_table = True ,
46
48
outlined_table_extraction = True ,
47
49
output_tables_as_HTML = True ,
48
- result_type = "markdown" ,
50
+ result_type = ResultType . MD ,
49
51
api_key = LLAMA_CLOUD_API_KEY ,
50
52
project_id = LLAMA_CLOUD_PROJECT_ID ,
51
- custom_client = get_custom_client (),
53
+ custom_client = get_base_cloud_client (),
54
+ )
55
+
56
+
57
+ @functools .lru_cache (maxsize = None )
58
+ def get_index (index_name : str ) -> LlamaCloudIndex :
59
+ return LlamaCloudIndex .create_index (
60
+ name = index_name ,
61
+ project_id = LLAMA_CLOUD_PROJECT_ID ,
62
+ api_key = LLAMA_CLOUD_API_KEY ,
63
+ base_url = LLAMA_CLOUD_BASE_URL ,
64
+ show_progress = True ,
65
+ custom_client = get_base_cloud_client (),
52
66
)
0 commit comments