diff --git a/supporting-blog-content/llamaindex-re-ranker-and-elasticsearch-re-ranker/llamaindex_rerank_notebook.ipynb b/supporting-blog-content/llamaindex-re-ranker-and-elasticsearch-re-ranker/llamaindex_rerank_notebook.ipynb new file mode 100644 index 00000000..a3701195 --- /dev/null +++ b/supporting-blog-content/llamaindex-re-ranker-and-elasticsearch-re-ranker/llamaindex_rerank_notebook.ipynb @@ -0,0 +1,817 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "LlrEjmtJNpuX" + }, + "source": [ + "# LlamaIndex re-ranker and Elasticsearch re-ranker: Comparison review\n", + "\n", + "This notebook is based on the article [LlamaIndex re-ranker and Elasticsearch re-ranker: Comparison review](https://www.elastic.co/search-labs/blog/llamaIndex-reranker-and-elasticsearch-reranker-comparison-review) and has the main goal to compare the performance of the LlamaIndex re-ranker and the Elasticsearch re-ranker." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "collapsed": true, + "id": "D1SqWMbbASRS", + "outputId": "8ab3bb5d-5d3c-4244-f14c-c303c7ab4f6f" + }, + "outputs": [], + "source": [ + "%pip install llama-index-core llama-index-llms-openai rank-llm llama-index-postprocessor-rankgpt-rerank llama-index-vector-stores-elasticsearch elasticsearch -q" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "id": "rAesontNXpLu" + }, + "outputs": [], + "source": [ + "import os\n", + "import nest_asyncio\n", + "from getpass import getpass\n", + "\n", + "from llama_index.vector_stores.elasticsearch import ElasticsearchStore\n", + "from llama_index.core import (\n", + " Document,\n", + " VectorStoreIndex,\n", + " QueryBundle,\n", + " Settings,\n", + " StorageContext,\n", + ")\n", + "from llama_index.core.retrievers import VectorIndexRetriever\n", + "from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank\n", + "from llama_index.llms.openai import OpenAI\n", + "\n", + "from elasticsearch import Elasticsearch\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup keys" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"ELASTICSEARCH_ENDPOINT\"] = getpass(\"Elastic Endpoint: \")\n", + "os.environ[\"ELASTICSEARCH_API_KEY\"] = getpass(\"Elastic Api Key: \")\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI Key: \")\n", + "\n", + "\n", + "INDEX_NAME = \"products-laptops\"\n", + "INFERENCE_RERANK_NAME = \"my-elastic-rerank\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3O2HclcYHEsS" + }, + "source": [ + "### Elasticsearch client" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "id": "EHYHU2-1nUeW" + }, + "outputs": [], + "source": [ + "_client = Elasticsearch(\n", + " os.environ[\"ELASTICSEARCH_ENDPOINT\"],\n", + " api_key=os.environ[\"ELASTICSEARCH_API_KEY\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mappings\n", + "\n", + "The mappings are defined to create the index in Elasticsearch. The `metadata` field is a nested field that contains the product information. The `semantic_field` is a field that is used to store the semantic representation of the product. The `embeddings` field is a dense vector field that is used to store the embeddings of the product used to perform the re-ranking with LlamaIndex." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " _client.indices.create(\n", + " index=INDEX_NAME,\n", + " body={\n", + " \"mappings\": {\n", + " \"properties\": {\n", + " \"metadata\": {\n", + " \"properties\": {\n", + " \"name\": {\"type\": \"text\", \"copy_to\": \"semantic_field\"},\n", + " \"description\": {\n", + " \"type\": \"text\",\n", + " \"copy_to\": \"semantic_field\",\n", + " },\n", + " \"price\": {\n", + " \"type\": \"float\",\n", + " },\n", + " \"reviews\": {\n", + " \"type\": \"float\",\n", + " },\n", + " \"sales\": {\"type\": \"integer\"},\n", + " \"features\": {\n", + " \"type\": \"keyword\",\n", + " \"copy_to\": \"semantic_field\",\n", + " },\n", + " }\n", + " },\n", + " \"semantic_field\": {\"type\": \"semantic_text\"},\n", + " \"text\": {\n", + " \"type\": \"text\"\n", + " }, # Field to store the text content for LlamaIndex\n", + " \"embeddings\": {\"type\": \"dense_vector\", \"dims\": 512},\n", + " }\n", + " }\n", + " },\n", + " )\n", + "\n", + " print(\"index created successfully\")\n", + "except Exception as e:\n", + " print(\n", + " f\"Error creating inference endpoint: {e.info['error']['root_cause'][0]['reason'] }\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "LVr6TR8qlw2M" + }, + "outputs": [], + "source": [ + "products = [\n", + " {\n", + " \"name\": \"ASUS ROG Strix G16\",\n", + " \"description\": \"Powerful gaming laptop with Intel Core i9 and RTX 4070.\",\n", + " \"price\": 1899.99,\n", + " \"reviews\": 4.7,\n", + " \"sales\": 320,\n", + " \"features\": [\n", + " \"Intel Core i9\",\n", + " \"RTX 4070\",\n", + " \"16GB RAM\",\n", + " \"512GB SSD\",\n", + " \"165Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Razer Blade 15\",\n", + " \"description\": \"Premium gaming laptop with an ultra-slim design and high refresh rate.\",\n", + " \"price\": 2499.99,\n", + " \"reviews\": 4.6,\n", + " \"sales\": 290,\n", + " \"features\": [\n", + " \"Intel Core i7\",\n", + " \"RTX 4060\",\n", + " \"16GB RAM\",\n", + " \"1TB SSD\",\n", + " \"240Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Acer Predator Helios 300\",\n", + " \"description\": \"Affordable yet powerful gaming laptop with RTX graphics.\",\n", + " \"price\": 1399.99,\n", + " \"reviews\": 4.5,\n", + " \"sales\": 500,\n", + " \"features\": [\n", + " \"Intel Core i7\",\n", + " \"RTX 3060\",\n", + " \"16GB RAM\",\n", + " \"512GB SSD\",\n", + " \"144Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"MSI Stealth 17\",\n", + " \"description\": \"High-performance gaming laptop with a 17-inch display.\",\n", + " \"price\": 2799.99,\n", + " \"reviews\": 4.8,\n", + " \"sales\": 200,\n", + " \"features\": [\"Intel Core i9\", \"RTX 4080\", \"32GB RAM\", \"1TB SSD\", \"4K Display\"],\n", + " },\n", + " {\n", + " \"name\": \"Dell XPS 15\",\n", + " \"description\": \"Sleek and powerful ultrabook with a high-resolution display.\",\n", + " \"price\": 2199.99,\n", + " \"reviews\": 4.7,\n", + " \"sales\": 350,\n", + " \"features\": [\n", + " \"Intel Core i7\",\n", + " \"RTX 3050 Ti\",\n", + " \"16GB RAM\",\n", + " \"1TB SSD\",\n", + " \"OLED Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"HP Omen 16\",\n", + " \"description\": \"Gaming laptop with a balanced price-to-performance ratio.\",\n", + " \"price\": 1599.99,\n", + " \"reviews\": 4.4,\n", + " \"sales\": 280,\n", + " \"features\": [\n", + " \"AMD Ryzen 7\",\n", + " \"RTX 3060\",\n", + " \"16GB RAM\",\n", + " \"512GB SSD\",\n", + " \"165Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Lenovo Legion 5 Pro\",\n", + " \"description\": \"Powerful Ryzen-powered gaming laptop with high refresh rate.\",\n", + " \"price\": 1799.99,\n", + " \"reviews\": 4.6,\n", + " \"sales\": 400,\n", + " \"features\": [\n", + " \"AMD Ryzen 9\",\n", + " \"RTX 3070 Ti\",\n", + " \"16GB RAM\",\n", + " \"1TB SSD\",\n", + " \"165Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"MacBook Pro 16\",\n", + " \"description\": \"Apple's most powerful laptop with M3 Max chip.\",\n", + " \"price\": 3499.99,\n", + " \"reviews\": 4.9,\n", + " \"sales\": 500,\n", + " \"features\": [\n", + " \"Apple M3 Max\",\n", + " \"32GB RAM\",\n", + " \"1TB SSD\",\n", + " \"Liquid Retina XDR Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Alienware m18\",\n", + " \"description\": \"High-end gaming laptop with extreme performance.\",\n", + " \"price\": 2999.99,\n", + " \"reviews\": 4.8,\n", + " \"sales\": 150,\n", + " \"features\": [\n", + " \"Intel Core i9\",\n", + " \"RTX 4090\",\n", + " \"32GB RAM\",\n", + " \"2TB SSD\",\n", + " \"480Hz Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Samsung Galaxy Book3 Ultra\",\n", + " \"description\": \"Ultra-lightweight yet powerful laptop with AMOLED display.\",\n", + " \"price\": 2099.99,\n", + " \"reviews\": 4.5,\n", + " \"sales\": 180,\n", + " \"features\": [\n", + " \"Intel Core i7\",\n", + " \"RTX 4070\",\n", + " \"16GB RAM\",\n", + " \"512GB SSD\",\n", + " \"AMOLED Display\",\n", + " ],\n", + " },\n", + " {\n", + " \"name\": \"Microsoft Surface Laptop 5\",\n", + " \"description\": \"Sleek productivity laptop with great battery life.\",\n", + " \"price\": 1699.99,\n", + " \"reviews\": 4.3,\n", + " \"sales\": 220,\n", + " \"features\": [\"Intel Core i7\", \"16GB RAM\", \"512GB SSD\", \"Touchscreen\"],\n", + " },\n", + " {\n", + " \"name\": \"Gigabyte AORUS 17\",\n", + " \"description\": \"Performance-focused gaming laptop with powerful cooling.\",\n", + " \"price\": 1999.99,\n", + " \"reviews\": 4.6,\n", + " \"sales\": 250,\n", + " \"features\": [\n", + " \"Intel Core i9\",\n", + " \"RTX 4070\",\n", + " \"16GB RAM\",\n", + " \"1TB SSD\",\n", + " \"360Hz Display\",\n", + " ],\n", + " },\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User question" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "user_query = \"Best laptops for gaming\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## LlamaIndex Reranking\n", + "\n", + "In this section we will use the LlamaIndex re-ranker to re-rank the results of the search query." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Indexing data to Elasticsearch\n", + "\n", + "The documents are indexed to Elasticsearch using the `ElasticsearchStore` class. This is a wrapper around the Elasticsearch client and inserts the documents into the Elasticsearch index in a `metadata` field." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i2fZZLDgxtPu" + }, + "outputs": [], + "source": [ + "document_objects = []\n", + "\n", + "es_store = ElasticsearchStore(\n", + " es_url=os.environ[\"ELASTICSEARCH_ENDPOINT\"],\n", + " es_api_key=os.environ[\"ELASTICSEARCH_API_KEY\"],\n", + " index_name=INDEX_NAME,\n", + " embedding_field=\"embeddings\",\n", + " text_field=\"text\",\n", + ")\n", + "\n", + "storage_context = StorageContext.from_defaults(vector_store=es_store)\n", + "\n", + "for doc in products:\n", + " text_content = f\"\"\"\n", + " Product Name: {doc[\"name\"]}\n", + " Description: {doc[\"description\"]}\n", + " Price: ${doc[\"price\"]}\n", + " Reviews: {doc[\"reviews\"]} stars\n", + " Sales: {doc[\"sales\"]} units sold\n", + " Features: {', '.join(doc[\"features\"])}\n", + " \"\"\"\n", + "\n", + " metadata = {\n", + " \"name\": doc[\"name\"],\n", + " \"description\": doc[\"description\"],\n", + " \"price\": doc[\"price\"],\n", + " \"reviews\": doc[\"reviews\"],\n", + " \"sales\": doc[\"sales\"],\n", + " \"features\": doc[\"features\"],\n", + " }\n", + "\n", + " document_objects.append(Document(text=text_content, metadata=metadata))\n", + "\n", + "index = VectorStoreIndex([], storage_context=storage_context)\n", + "\n", + "for doc in document_objects:\n", + " index.insert(doc)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LLM setup" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "Settings.llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-16k\")\n", + "Settings.chunk_size = 512" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LlamaIndex rerank feature" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "id": "cAYwonikXP8b" + }, + "outputs": [], + "source": [ + "# Re-rank method\n", + "def get_retrieved_nodes(\n", + " query_str, vector_top_k=10, reranker_top_n=5, with_reranker=False\n", + "):\n", + " query_bundle = QueryBundle(query_str)\n", + " # configure retriever\n", + " retriever = VectorIndexRetriever(\n", + " index=index,\n", + " similarity_top_k=vector_top_k,\n", + " )\n", + " retrieved_nodes = retriever.retrieve(query_bundle)\n", + "\n", + " if with_reranker:\n", + " # configure reranker\n", + " reranker = RankGPTRerank(\n", + " llm=OpenAI(\n", + " model=\"gpt-4.1-mini\",\n", + " temperature=0.0,\n", + " api_key=os.environ[\"OPENAI_API_KEY\"],\n", + " ),\n", + " top_n=reranker_top_n,\n", + " verbose=True,\n", + " )\n", + " retrieved_nodes = reranker.postprocess_nodes(retrieved_nodes, query_bundle)\n", + "\n", + " return retrieved_nodes" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to format the resulting documents.\n", + "def visualize_retrieved_nodes(nodes):\n", + " formatted_results = []\n", + "\n", + " for node in nodes:\n", + " text = node.node.get_text()\n", + "\n", + " product_name = text.split(\"Product Name:\")[1].split(\"\\n\")[0].strip()\n", + " price = text.split(\"Price:\")[1].split(\"\\n\")[0].strip()\n", + " reviews = text.split(\"Reviews:\")[1].split(\"\\n\")[0].strip()\n", + " features = text.split(\"Features:\")[1].strip()\n", + "\n", + " formatted_result = f\"{price} - {product_name} ({reviews}) {features}\"\n", + " formatted_results.append(formatted_result)\n", + "\n", + " return formatted_results" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PMFhgK9CXTze" + }, + "source": [ + "### Without rerank" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 352 + }, + "id": "MNcpUJsKXZPK", + "outputId": "58b4a8b9-a10f-4eae-f727-3268e953b806" + }, + "outputs": [], + "source": [ + "new_nodes = get_retrieved_nodes(\n", + " query_str=user_query,\n", + " vector_top_k=5,\n", + " with_reranker=False,\n", + ")\n", + "\n", + "results = visualize_retrieved_nodes(new_nodes)\n", + "\n", + "print(\"\\nTop 5 results without rerank:\")\n", + "for idx, result in enumerate(results, start=1):\n", + " print(f\"{idx}. {result}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### With rerank" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 369 + }, + "id": "JXTnX6rrIa-D", + "outputId": "c12b1e40-07b2-47b2-a3c9-fcbc1e81a47f" + }, + "outputs": [], + "source": [ + "new_nodes = get_retrieved_nodes(\n", + " user_query,\n", + " vector_top_k=5,\n", + " reranker_top_n=5,\n", + " with_reranker=True,\n", + ")\n", + "\n", + "results = visualize_retrieved_nodes(new_nodes)\n", + "\n", + "print(\"\\nTop 5 results with reranking:\")\n", + "for idx, result in enumerate(results, start=1):\n", + " print(f\"{idx}. {result}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Elasticsearch Reranking\n", + "\n", + "In this section we will use the Elasticsearch re-ranker to re-rank the results of the search query." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Creating inference rerank endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " _client.options(\n", + " request_timeout=60, max_retries=3, retry_on_timeout=True\n", + " ).inference.put(\n", + " task_type=\"rerank\",\n", + " inference_id=INFERENCE_RERANK_NAME,\n", + " body={\n", + " \"service\": \"elasticsearch\",\n", + " \"service_settings\": {\n", + " \"model_id\": \".rerank-v1\",\n", + " \"num_threads\": 1,\n", + " \"adaptive_allocations\": {\n", + " \"enabled\": True,\n", + " \"min_number_of_allocations\": 1,\n", + " \"max_number_of_allocations\": 4,\n", + " },\n", + " },\n", + " },\n", + " )\n", + "\n", + " print(\"Inference endpoint created successfully.\")\n", + "except Exception as e:\n", + " print(\n", + " f\"Error creating inference endpoint: {e.info['error']['root_cause'][0]['reason'] }\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Function to execute search queries" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "id": "X0xbyG_fnoBU" + }, + "outputs": [], + "source": [ + "# Function to execute search queries\n", + "async def es_search(query):\n", + " response = _client.search(index=INDEX_NAME, body=query)\n", + " hits = response[\"hits\"][\"hits\"]\n", + "\n", + " if not hits:\n", + " return \"\"\n", + "\n", + " return hits" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to format the results\n", + "def format_es_results(hits):\n", + " formatted_results = []\n", + "\n", + " for hit in hits:\n", + " metadata = hit[\"_source\"][\"metadata\"]\n", + "\n", + " name = metadata.get(\"name\")\n", + " price = metadata.get(\"price\")\n", + " reviews = metadata.get(\"reviews\")\n", + " features = metadata.get(\"features\")\n", + "\n", + " formatted_result = f\"{price} - {name} ({reviews}) {features}\"\n", + " formatted_results.append(formatted_result)\n", + "\n", + " return formatted_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Semantic query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "semantic_results = await es_search(\n", + " {\n", + " \"size\": 5,\n", + " \"query\": {\n", + " \"semantic\": {\n", + " \"field\": \"semantic_field\",\n", + " \"query\": user_query,\n", + " }\n", + " },\n", + " \"_source\": {\n", + " \"includes\": [\n", + " \"metadata\",\n", + " ]\n", + " },\n", + " }\n", + ")\n", + "\n", + "semantic_formatted_results = format_es_results(semantic_results)\n", + "\n", + "print(\"Query results:\")\n", + "for idx, result in enumerate(semantic_formatted_results, start=1):\n", + " print(f\"{idx}. {result}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ME8w7MR4orci", + "outputId": "c8495176-ef78-4fb2-8cd8-6ca6c25b0811" + }, + "outputs": [], + "source": [ + "rerank_results = await es_search(\n", + " {\n", + " \"size\": 5,\n", + " \"_source\": {\n", + " \"includes\": [\n", + " \"metadata\",\n", + " ]\n", + " },\n", + " \"retriever\": {\n", + " \"text_similarity_reranker\": {\n", + " \"retriever\": {\n", + " \"standard\": {\n", + " \"query\": {\n", + " \"semantic\": {\n", + " \"field\": \"semantic_field\",\n", + " \"query\": user_query,\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"field\": \"semantic_field\",\n", + " \"inference_id\": INFERENCE_RERANK_NAME,\n", + " \"inference_text\": \"reorder by quality-price ratio\",\n", + " \"rank_window_size\": 5,\n", + " }\n", + " },\n", + " }\n", + ")\n", + "\n", + "rerank_formatted_results = format_es_results(rerank_results)\n", + "\n", + "print(\"Query results:\")\n", + "for idx, result in enumerate(rerank_formatted_results, start=1):\n", + " print(f\"{idx}. {result}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "S6WZMJayyzxh" + }, + "source": [ + "## Cleaning environment\n", + "\n", + "Delete the resources used to prevent them from consuming resources." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_results(results):\n", + " if results.get(\"acknowledged\", False):\n", + " print(\"DELETED successfully.\")\n", + "\n", + " if \"error\" in results:\n", + " print(f\"ERROR: {results['error']['root_cause'][0]['reason']}\")\n", + "\n", + "\n", + "# Cleanup - Delete Index\n", + "result = _client.indices.delete(index=INDEX_NAME, ignore=[400, 404])\n", + "print_results(result)\n", + "\n", + "\n", + "# Cleanup - Inference endpoint\n", + "result = _client.inference.delete(inference_id=INFERENCE_RERANK_NAME, ignore=[400, 404])\n", + "print_results(result)" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [ + "9lvPHaXjPlfu" + ], + "provenance": [] + }, + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +}