diff --git a/conda/meta.yaml b/conda/meta.yaml
index 1d0031e..49061e0 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -18,7 +18,7 @@ requirements:
     - python
     - setuptools
   run:
-    - numpy
+    - numpy==1.24.0
     - pandas
     - xarray
     - scipy
diff --git a/dev/generate datapackages.ipynb b/dev/generate datapackages.ipynb
index 7fdab39..9b7498a 100644
--- a/dev/generate datapackages.ipynb	
+++ b/dev/generate datapackages.ipynb	
@@ -236,174 +236,63 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:08:59\n"
+      "0% [#                         ] 100% | ETA: 00:00:01"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "------ Calculating LCA results for 2010...\n"
+      "[{35812: <xarray.DataArray 'value' ()> Size: 8B\n",
+      "array(6.18299776e+11)\n",
+      "Coordinates:\n",
+      "    pathway    <U10 40B 'SSP2-RCP19'\n",
+      "    region     <U3 12B 'BRA'\n",
+      "    year       int64 8B 2005\n",
+      "    model      <U5 20B 'image'\n",
+      "    variables  <U49 196B 'Industry_Food and Tobacco_Solid biomass'}, {35812: <xarray.DataArray 'value' ()> Size: 8B\n",
+      "array(6.18299776e+11)\n",
+      "Coordinates:\n",
+      "    pathway    <U10 40B 'SSP2-RCP19'\n",
+      "    region     <U3 12B 'BRA'\n",
+      "    year       int64 8B 2005\n",
+      "    model      <U5 20B 'image'\n",
+      "    variables  <U49 196B 'Industry_Food processing_Solid biomass'}, {36217: <xarray.DataArray 'value' ()> Size: 8B\n",
+      "array(5.26656812e+11)\n",
+      "Coordinates:\n",
+      "    pathway    <U10 40B 'SSP2-RCP19'\n",
+      "    region     <U3 12B 'BRA'\n",
+      "    year       int64 8B 2005\n",
+      "    model      <U5 20B 'image'\n",
+      "    variables  <U49 196B 'Transport_Freight (heavy truck)_Liquid fossil'}, {36190: <xarray.DataArray 'value' ()> Size: 8B\n",
+      "array(1.15323356e+12)\n",
+      "Coordinates:\n",
+      "    pathway    <U10 40B 'SSP2-RCP19'\n",
+      "    region     <U3 12B 'BRA'\n",
+      "    year       int64 8B 2005\n",
+      "    model      <U5 20B 'image'\n",
+      "    variables  <U49 196B 'Transport_Passenger_Liquid'}]\n"
      ]
     },
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:09:20\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2015...\n",
-      "LCA matrices not found for the given model, scenario, and year.\n",
-      "------ Calculating LCA results for 2020...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:09:14\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2025...\n",
-      "LCA matrices not found for the given model, scenario, and year.\n",
-      "------ Calculating LCA results for 2030...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:06:54\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2035...\n",
-      "LCA matrices not found for the given model, scenario, and year.\n",
-      "------ Calculating LCA results for 2040...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:04:35\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2045...\n",
-      "LCA matrices not found for the given model, scenario, and year.\n",
-      "------ Calculating LCA results for 2050...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:04:49\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2060...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:06:30\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2070...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:08:32\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2080...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:08:39\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2090...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:09:35\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "------ Calculating LCA results for 2100...\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "0% [##########################] 100% | ETA: 00:00:00\n",
-      "Total time elapsed: 00:10:13\n"
+     "ename": "TypeError",
+     "evalue": "MultiLCA.__init__() got an unexpected keyword argument 'demands'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[2], line 2\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mnumpy\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mnp\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m dp \u001b[38;5;241m=\u001b[39m \u001b[43mp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcalculate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m      3\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmethods\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\n\u001b[1;32m      4\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;66;43;03m#'EF v3.1 - climate change - global warming potential (GWP100)',\u001b[39;49;00m\n\u001b[1;32m      5\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mRELICS - metals extraction - Lithium\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m      6\u001b[0m \u001b[43m    \u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m      7\u001b[0m \u001b[43m    \u001b[49m\u001b[43mregions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mr\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mr\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscenarios\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcoords\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mregion\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalues\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mr\u001b[49m\u001b[38;5;241;43m!=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWorld\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m      8\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m#regions=[\"WEU\",],\u001b[39;49;00m\n\u001b[1;32m      9\u001b[0m \u001b[43m    \u001b[49m\u001b[43mscenarios\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSSP2-RCP19\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     10\u001b[0m \u001b[43m    \u001b[49m\u001b[43mvariables\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\n\u001b[1;32m     11\u001b[0m \u001b[43m        \u001b[49m\u001b[43mv\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscenarios\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcoords\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mvariables\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalues\u001b[49m\n\u001b[1;32m     12\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43many\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mi\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mIndustry\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mTransport\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mHeating\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     13\u001b[0m \u001b[43m    \u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     14\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m#years=[2005, 2020, 2050, 2070, 2100],\u001b[39;49;00m\n\u001b[1;32m     15\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcharacterization\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m     16\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m#flows=[\"Lithium - natural resource - in ground - kilogram\",],\u001b[39;49;00m\n\u001b[1;32m     17\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m#data_type=np.float32,\u001b[39;49;00m\n\u001b[1;32m     18\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmultiprocessing\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m     19\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdemand_cutoff\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.1\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m     20\u001b[0m \u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/GitHub/pathways/pathways/pathways.py:742\u001b[0m, in \u001b[0;36mPathways.calculate\u001b[0;34m(self, methods, models, scenarios, regions, years, variables, characterization, flows, multiprocessing, data_type, demand_cutoff)\u001b[0m\n\u001b[1;32m    739\u001b[0m         bar\u001b[38;5;241m.\u001b[39mupdate()\n\u001b[1;32m    740\u001b[0m         \u001b[38;5;66;03m# Iterate over each region\u001b[39;00m\n\u001b[1;32m    741\u001b[0m         results\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m--> 742\u001b[0m             \u001b[43mprocess_region\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    743\u001b[0m \u001b[43m                \u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    744\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    745\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mscenario\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    746\u001b[0m \u001b[43m                    \u001b[49m\u001b[43myear\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    747\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mregion\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    748\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mvariables\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    749\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mvars_info\u001b[49m\u001b[43m[\u001b[49m\u001b[43mregion\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    750\u001b[0m \u001b[43m                    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscenarios\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    751\u001b[0m \u001b[43m                    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    752\u001b[0m \u001b[43m                    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43munits\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    753\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mdp\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    754\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mA_index\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    755\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mB_index\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    756\u001b[0m \u001b[43m                    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreverse_classifications\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    757\u001b[0m \u001b[43m                    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlca_results\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcoords\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    758\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mflows\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    759\u001b[0m \u001b[43m                    \u001b[49m\u001b[43mdemand_cutoff\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    760\u001b[0m \u001b[43m                \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    761\u001b[0m \u001b[43m            \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    762\u001b[0m         )\n\u001b[1;32m    764\u001b[0m \u001b[38;5;66;03m# Store results in the LCA results xarray\u001b[39;00m\n\u001b[1;32m    765\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m result \u001b[38;5;129;01min\u001b[39;00m results:\n",
+      "File \u001b[0;32m~/GitHub/pathways/pathways/pathways.py:303\u001b[0m, in \u001b[0;36mprocess_region\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m    295\u001b[0m     FU\u001b[38;5;241m.\u001b[39mappend(\n\u001b[1;32m    296\u001b[0m         {\n\u001b[1;32m    297\u001b[0m             idx: demand \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mfloat\u001b[39m(unit_vector),\n\u001b[1;32m    298\u001b[0m         }\n\u001b[1;32m    299\u001b[0m     )\n\u001b[1;32m    301\u001b[0m \u001b[38;5;28mprint\u001b[39m(FU)\n\u001b[0;32m--> 303\u001b[0m lca \u001b[38;5;241m=\u001b[39m \u001b[43mbc\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mMultiLCA\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    304\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdemands\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mFU\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    305\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmethod_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdp\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    306\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdata_objs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mdp\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    307\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    308\u001b[0m lca\u001b[38;5;241m.\u001b[39mlci()\n\u001b[1;32m    309\u001b[0m lca\u001b[38;5;241m.\u001b[39mlcia()\n",
+      "\u001b[0;31mTypeError\u001b[0m: MultiLCA.__init__() got an unexpected keyword argument 'demands'"
      ]
     }
    ],
    "source": [
     "import numpy as np\n",
-    "p.calculate(\n",
+    "dp = p.calculate(\n",
     "    methods=[\n",
-    "        'EF v3.1 - climate change - global warming potential (GWP100)',\n",
+    "        #'EF v3.1 - climate change - global warming potential (GWP100)',\n",
     "        'RELICS - metals extraction - Lithium',\n",
     "    ],\n",
     "    regions=[r for r in p.scenarios.coords[\"region\"].values if r!=\"World\"],\n",
@@ -422,6 +311,36 @@
     ")"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "dedf1bb9-47a2-497c-b788-7fa64986827a",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "\u001b[0;31mSignature:\u001b[0m \u001b[0mdp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdehydrated_interfaces\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mList\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+       "\u001b[0;31mSource:\u001b[0m   \n",
+       "    \u001b[0;32mdef\u001b[0m \u001b[0mdehydrated_interfaces\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mList\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m        \u001b[0;34m\"\"\"Return a list of the resource groups which have dehydrated interfaces\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m        \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m            \u001b[0mobj\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"group\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m            \u001b[0;32mfor\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresources\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m            \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mUndefinedInterface\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\n",
+       "\u001b[0;34m\u001b[0m        \u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+       "\u001b[0;31mFile:\u001b[0m      /opt/homebrew/Caskroom/miniforge/base/envs/pathways/lib/python3.11/site-packages/bw_processing/datapackage.py\n",
+       "\u001b[0;31mType:\u001b[0m      method"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "dp.dehydrated_interfaces??"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
diff --git a/pathways/lca.py b/pathways/lca.py
index 833f972..51c5326 100644
--- a/pathways/lca.py
+++ b/pathways/lca.py
@@ -1,15 +1,16 @@
 import csv
-import sys
 import warnings
 from pathlib import Path
 from typing import Dict, List, Optional, Tuple
 
-import numpy as np
-import scipy
 import scipy.sparse
 import xarray as xr
 from scipy import sparse
 from scipy.sparse import csr_matrix
+import bw_processing as bwp
+import numpy as np
+
+from .lcia import get_lcia_methods
 
 # Attempt to import pypardiso's spsolve function.
 # If it isn't available, fall back on scipy's spsolve.
@@ -84,49 +85,31 @@ def read_indices_csv(file_path: Path) -> Dict[Tuple[str, str, str, str], str]:
 
 def load_matrix_and_index(
     file_path: Path,
-    num_indices: int,
-    sign: int = 1,
-    transpose: bool = False,
-    extra_indices: Optional[int] = None,
-    data_type: np.dtype = np.float32,
-) -> csr_matrix:
+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
     """
     Reads a CSV file and returns its contents as a CSR sparse matrix.
 
     :param file_path: The path to the CSV file.
     :type file_path: Path
-    :param num_indices: The number of indices in the data.
-    :type num_indices: int
-    :param transpose: Whether or not to transpose the matrix. Default is False.
-    :type transpose: bool
-    :param extra_indices: Optional parameter to adjust the shape of the matrix. Default is None.
-    :type extra_indices: Optional[int]
-
-    :return: The data from the CSV file as a CSR sparse matrix.
-    :rtype: csr_matrix
+    :return: A tuple containing the data, indices, and sign of the data.
+    :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray]
     """
     # Load the data from the CSV file
     coords = np.genfromtxt(file_path, delimiter=";", skip_header=1)
 
-    # Extract the row and column indices and the data
-    I = coords[:, 0].astype(int)
-    J = coords[:, 1].astype(int)
-    data = coords[:, 2] * sign
+    # give `indices_array` a list of tuples of indices
+    indices_array = np.array(
+        list(zip(coords[:, 0].astype(int), coords[:, 1].astype(int))),
+        dtype=bwp.INDICES_DTYPE,
+    )
 
-    # Determine the shape of the matrix
-    if transpose:
-        shape = (num_indices, I.max() + 1)
-        idx = (J, I)
-    else:
-        shape = (
-            extra_indices if extra_indices is not None else I.max() + 1,
-            num_indices,
-        )
-        idx = (I, J)
+    data_array = coords[:, 2]
+    # make a boolean scalar array to store the sign of the data
+    sign_array = np.where(data_array < 0, True, False)
+    # make values of data_array all positive
+    data_array = np.abs(data_array)
 
-    # Create and return the CSR matrix
-    matrix = csr_matrix((data, idx), shape=shape, dtype=data_type)
-    return matrix
+    return data_array, indices_array, sign_array
 
 
 def get_lca_matrices(
@@ -134,8 +117,8 @@ def get_lca_matrices(
     model: str,
     scenario: str,
     year: int,
-    data_type: np.dtype = np.float32,
-) -> Tuple[sparse.csr_matrix, sparse.csr_matrix, Dict, Dict]:
+    methods: list,
+) -> Tuple[bwp.datapackage, Dict, Dict]:
     """
     Retrieve Life Cycle Assessment (LCA) matrices from disk.
 
@@ -154,29 +137,85 @@ def get_lca_matrices(
     A_inds = read_indices_csv(dirpath / "A_matrix_index.csv")
     B_inds = read_indices_csv(dirpath / "B_matrix_index.csv")
 
-    A = load_matrix_and_index(
-        dirpath / "A_matrix.csv", len(A_inds), transpose=True, data_type=data_type
-    )
+    # create brightway datapackage
+    dp = bwp.create_datapackage()
 
-    # if A is not square, raise an error
-    if A.shape[0] != A.shape[1]:
-        raise ValueError(f"A must be a square matrix. Current shape is {A.shape}.")
+    a_data, a_indices, a_sign = load_matrix_and_index(
+        dirpath / "A_matrix.csv",
+    )
 
-    B = load_matrix_and_index(
+    b_data, b_indices, b_sign = load_matrix_and_index(
         dirpath / "B_matrix.csv",
-        len(B_inds),
-        sign=-1,
-        extra_indices=len(A_inds),
-        data_type=data_type,
     )
 
-    if B.shape[0] != A.shape[0]:
-        raise ValueError(
-            f"Incompatible dimensions between A and B. A shape: {A.shape}, B shape: {B.shape}"
-        )
+    dp.add_persistent_vector(
+        matrix='technosphere_matrix',
+        indices_array=a_indices,
+        data_array=a_data,
+        flip_array=a_sign,
+    )
+    dp.add_persistent_vector(
+        matrix='biosphere_matrix',
+        indices_array=b_indices,
+        data_array=b_data,
+        flip_array=b_sign,
+    )
+
+    # c_data, c_indices, c_sign = fill_characterization_factors_matrix(
+    #     B_inds, methods
+    # )
+    #
+    #
+    # # if c_data is multidimensional
+    # if len(c_data.shape) > 1:
+    #     dp.add_persistent_array(
+    #         matrix='characterization_matrix',
+    #         indices_array=c_indices,
+    #         data_array=c_data,
+    #         flip_array=c_sign,
+    #     )
+    # else:
+    #     dp.add_persistent_vector(
+    #         matrix='characterization_matrix',
+    #         indices_array=c_indices,
+    #         data_array=c_data,
+    #         flip_array=c_sign,
+    #     )
+
+    return dp, A_inds, B_inds
+
+
+def fill_characterization_factors_matrix(
+        biosphere_flows: dict,
+        methods
+) -> np.ndarray:
+    """
+    Create a characterization matrix based on the list of biosphere flows
+    given.
+    :param biosphere_flows:
+    :param methods: contains names of the methods to use.
+    :return:
+    """
+
+    lcia_data = get_lcia_methods(methods=methods)
+
+    print(lcia_data)
+
+
+    # create a numpy array filled with zeros
+    # of size equal to biosphere_flows and lcia methods
+
+    cf_matrix = np.zeros((len(biosphere_flows), len(methods)))
 
-    return A, B, A_inds, B_inds
+    # fill the matrix
+    for i, flow in enumerate(biosphere_flows):
+        for j, method in enumerate(methods):
+            try:
+                cf_matrix[i, j] = lcia_data[method][flow[:3]]
+            except KeyError:
+                continue
 
+    return cf_matrix
 
 def remove_double_counting(A: csr_matrix, vars_info: dict) -> csr_matrix:
     """
diff --git a/pathways/lcia.py b/pathways/lcia.py
index aa97a47..70a485d 100644
--- a/pathways/lcia.py
+++ b/pathways/lcia.py
@@ -36,36 +36,15 @@ def format_lcia_method_exchanges(method):
     }
 
 
-def get_lcia_methods():
+def get_lcia_methods(methods: list = None):
     """Get a list of available LCIA methods."""
     with open(LCIA_METHODS, "r") as f:
         data = json.load(f)
 
-    return {" - ".join(x["name"]): format_lcia_method_exchanges(x) for x in data}
-
+    if methods:
+        data = [x for x in data if " - ".join(x["name"]) in methods]
 
-def fill_characterization_factors_matrix(biosphere_flows: list, methods) -> np.ndarray:
-    """
-    Create a characterization matrix base don the list of biosphere flows
-    given.
-    :param biosphere_flows:
-    :param methods: contains names of the methods to use.
-    :return:
-    """
-
-    lcia_data = get_lcia_methods()
-
-    # create a numpy array filled with zeros
-    # of size equal to biosphere_flows and lcia methods
+    return {" - ".join(x["name"]): format_lcia_method_exchanges(x) for x in data}
 
-    cf_matrix = np.zeros((len(biosphere_flows), len(methods)))
 
-    # fill the matrix
-    for i, flow in enumerate(biosphere_flows):
-        for j, method in enumerate(methods):
-            try:
-                cf_matrix[i, j] = lcia_data[method][flow[:3]]
-            except KeyError:
-                continue
 
-    return cf_matrix
diff --git a/pathways/pathways.py b/pathways/pathways.py
index 5e944fc..41c16d5 100644
--- a/pathways/pathways.py
+++ b/pathways/pathways.py
@@ -17,6 +17,7 @@
 import yaml
 from datapackage import DataPackage
 from premise.geomap import Geomap
+import bw2calc as bc
 
 from . import DATA_DIR
 from .data_validation import validate_datapackage
@@ -27,7 +28,7 @@
     remove_double_counting,
     solve_inventory,
 )
-from .lcia import fill_characterization_factors_matrix, get_lcia_method_names
+from .lcia import get_lcia_method_names
 from .utils import (
     _get_activity_indices,
     create_lca_results_array,
@@ -215,11 +216,9 @@ def process_region(data: Tuple) -> Union[None, Dict[str, Any]]:
         scenarios,
         mapping,
         units_map,
-        A,
-        B,
+        dp,
         A_index,
         B_index,
-        lcia_matrix,
         reverse_classifications,
         lca_results_coords,
         flows,
@@ -239,25 +238,27 @@ def process_region(data: Tuple) -> Union[None, Dict[str, Any]]:
 
     act_categories = lca_results_coords["act_category"].values
 
-    if lcia_matrix is not None:
-        impact_categories = lca_results_coords["impact_category"].values
-        target = np.zeros(
-            (
-                len(act_categories),
-                len(list(vars_idx)),
-                len(locations),
-                len(impact_categories),
-            )
-        )
-    else:
-        if flows is not None:
-            target = np.zeros(
-                (len(act_categories), len(list(vars_idx)), len(locations), len(flows))
-            )
-        else:
-            target = np.zeros(
-                (len(act_categories), len(list(vars_idx)), len(locations), len(B_index))
-            )
+    # if lcia_matrix is not None:
+    #     impact_categories = lca_results_coords["impact_category"].values
+    #     target = np.zeros(
+    #         (
+    #             len(act_categories),
+    #             len(list(vars_idx)),
+    #             len(locations),
+    #             len(impact_categories),
+    #         )
+    #     )
+    # else:
+    #     if flows is not None:
+    #         target = np.zeros(
+    #             (len(act_categories), len(list(vars_idx)), len(locations), len(flows))
+    #         )
+    #     else:
+    #         target = np.zeros(
+    #             (len(act_categories), len(list(vars_idx)), len(locations), len(B_index))
+    #         )
+
+    FU = []
 
     for v, variable in enumerate(variables):
         idx, dataset = vars_idx[variable]["idx"], vars_idx[variable]["dataset"]
@@ -291,72 +292,93 @@ def process_region(data: Tuple) -> Union[None, Dict[str, Any]]:
         ) < demand_cutoff:
             continue
 
-        # Create the demand vector
-        f = create_demand_vector([idx], A, demand, unit_vector)
-
-        # Solve the inventory
-        C = solve_inventory(A, B, f)
-
-        if lcia_matrix is not None:
-            if lcia_matrix.ndim != 2 or lcia_matrix.shape[0] != B.shape[1]:
-                raise ValueError("Incompatible dimensions between B and lcia_matrix")
-
-            # Solve the LCA problem to get the LCIA scores
-            D = characterize_inventory(C, lcia_matrix)
-
-            # Sum along the first axis of D to get final result
-            D = D.sum(axis=1)
-
-            # Initialize the new array with zeros for missing data
-            E = np.zeros((len(A_index), len(locations), len(impact_categories)))
-
-            # Populate the result array
-            act_locs = [a[-1] for a in rev_A_index.values()]
-
-            for i, act in enumerate(rev_A_index.values()):
-                if act[-1] in act_locs:
-                    loc_idx = location_to_index[act[-1]]
-                    E[i, loc_idx, :] = D[i, :]
-
-            acts_idx = generate_A_indices(
-                A_index,
-                reverse_classifications,
-                lca_results_coords,
-            )
-
-            # Sum over the first axis of D,
-            # using acts_idx for advanced indexing
-            target[:, v] = E[acts_idx, ...].sum(axis=0)
-
-        else:
-            # else, just sum the results of the inventory
-            acts_idx = generate_A_indices(
-                A_index,
-                reverse_classifications,
-                lca_results_coords,
-            )
-            if flows is not None:
-
-                def transf_flow(f):
-                    return tuple(f.split(" - "))
-
-                flows_idx = [int(B_index[transf_flow(f)]) for f in flows]
-                C = C[:, flows_idx]
-
-                # Initialize the new array with zeros for missing data
-                E = np.zeros((len(A_index), len(locations), len(flows_idx)))
-
-                # Populate the result array
-                act_locs = [a[-1] for a in rev_A_index.values()]
+        FU.append(
+            {
+                idx: demand.values * float(unit_vector),
+            }
+        )
 
-                for i, act in enumerate(rev_A_index.values()):
-                    if act[-1] in act_locs:
-                        loc_idx = location_to_index[act[-1]]
-                        E[i, loc_idx, :] = C[i, :]
+    print(FU)
 
-                target[:, v] = E[acts_idx, ...].sum(axis=0)
-            else:
-                target[:, v] = C[acts_idx, ...].sum(axis=0)
+    lca = bc.MultiLCA(
+        demands=FU,
+        method_config=dp,
+        data_objs=[dp],
+    )
+    lca.lci()
+    lca.lcia()
+    lca.score
+
+
+
+        #
+        #
+        # # Create the demand vector
+        # f = create_demand_vector([idx], A, demand, unit_vector)
+        #
+        # # Solve the inventory
+        # C = solve_inventory(A, B, f)
+        #
+        # if lcia_matrix is not None:
+        #     if lcia_matrix.ndim != 2 or lcia_matrix.shape[0] != B.shape[1]:
+        #         raise ValueError("Incompatible dimensions between B and lcia_matrix")
+        #
+        #     # Solve the LCA problem to get the LCIA scores
+        #     D = characterize_inventory(C, lcia_matrix)
+        #
+        #     # Sum along the first axis of D to get final result
+        #     D = D.sum(axis=1)
+        #
+        #     # Initialize the new array with zeros for missing data
+        #     E = np.zeros((len(A_index), len(locations), len(impact_categories)))
+        #
+        #     # Populate the result array
+        #     act_locs = [a[-1] for a in rev_A_index.values()]
+        #
+        #     for i, act in enumerate(rev_A_index.values()):
+        #         if act[-1] in act_locs:
+        #             loc_idx = location_to_index[act[-1]]
+        #             E[i, loc_idx, :] = D[i, :]
+        #
+        #     acts_idx = generate_A_indices(
+        #         A_index,
+        #         reverse_classifications,
+        #         lca_results_coords,
+        #     )
+        #
+        #     # Sum over the first axis of D,
+        #     # using acts_idx for advanced indexing
+        #     target[:, v] = E[acts_idx, ...].sum(axis=0)
+        #
+        # else:
+        #     # else, just sum the results of the inventory
+        #     acts_idx = generate_A_indices(
+        #         A_index,
+        #         reverse_classifications,
+        #         lca_results_coords,
+        #     )
+        #     if flows is not None:
+        #
+        #         def transf_flow(f):
+        #             return tuple(f.split(" - "))
+        #
+        #         flows_idx = [int(B_index[transf_flow(f)]) for f in flows]
+        #         C = C[:, flows_idx]
+        #
+        #         # Initialize the new array with zeros for missing data
+        #         E = np.zeros((len(A_index), len(locations), len(flows_idx)))
+        #
+        #         # Populate the result array
+        #         act_locs = [a[-1] for a in rev_A_index.values()]
+        #
+        #         for i, act in enumerate(rev_A_index.values()):
+        #             if act[-1] in act_locs:
+        #                 loc_idx = location_to_index[act[-1]]
+        #                 E[i, loc_idx, :] = C[i, :]
+        #
+        #         target[:, v] = E[acts_idx, ...].sum(axis=0)
+        #     else:
+        #         target[:, v] = C[acts_idx, ...].sum(axis=0)
 
     # Return a dictionary containing the processed LCA data for the given region
     def get_indices():
@@ -637,10 +659,10 @@ def calculate(
 
                     # Try to load LCA matrices for the given model, scenario, and year
                     try:
-                        A, B, A_index, B_index = get_lca_matrices(
-                            self.datapackage, model, scenario, year, data_type=data_type
+                        dp, A_index, B_index = get_lca_matrices(
+                            self.datapackage, model, scenario, year, methods
                         )
-                        B = np.asarray(B.todense())
+
                     except FileNotFoundError:
                         # If LCA matrices can't be loaded, skip to the next iteration
                         print(
@@ -676,12 +698,6 @@ def calculate(
                             flows=flows,
                         )
 
-                    # Fill characterization factor matrix for the given methods
-                    if characterization is True:
-                        self.lcia_matrix = fill_characterization_factors_matrix(
-                            list(B_index.keys()), methods
-                        )
-
                     # Check for activities not found in classifications
                     for act in A_index:
                         if act not in self.classifications:
@@ -700,11 +716,9 @@ def calculate(
                                 self.scenarios,
                                 self.mapping,
                                 self.units,
-                                A,
-                                B,
+                                dp,
                                 A_index,
                                 B_index,
-                                self.lcia_matrix if characterization else None,
                                 self.reverse_classifications,
                                 self.lca_results.coords,
                                 flows,
@@ -736,11 +750,9 @@ def calculate(
                                         self.scenarios,
                                         self.mapping,
                                         self.units,
-                                        A,
-                                        B,
+                                        dp,
                                         A_index,
                                         B_index,
-                                        self.lcia_matrix if characterization else None,
                                         self.reverse_classifications,
                                         self.lca_results.coords,
                                         flows,
diff --git a/requirements.txt b/requirements.txt
index 3cda010..dd2058f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,8 @@
-numpy
+numpy==1.24.0
 pathlib
 pandas
 scipy
 xarray
 premise
 pyyaml
-pypardiso
+scikit-umfpack
diff --git a/setup.py b/setup.py
index ab08c82..12afebd 100644
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@ def package_files(directory):
     # Might need to change the directory name as well.
     include_package_data=True,
     install_requires=[
-        "numpy",
+        "numpy==1.24.0",
         "pathlib",
         "pandas",
         "xarray",
@@ -49,7 +49,7 @@ def package_files(directory):
         "scipy",
         "premise",
         "pyyaml",
-        "pypardiso",
+        "scikit-umfpack"
     ],
     url="https://github.com/polca/premise",
     description="Scenario-level LCA of energy systems and transition pathways",