{"dataType":"CVE_RECORD","dataVersion":"5.2","cveMetadata":{"cveId":"CVE-2026-34070","assignerOrgId":"a0819718-46f1-4df5-94e2-005712e83aaa","state":"PUBLISHED","assignerShortName":"GitHub_M","dateReserved":"2026-03-25T16:21:40.867Z","datePublished":"2026-03-31T02:01:49.320Z","dateUpdated":"2026-03-31T18:04:59.283Z"},"containers":{"cna":{"title":"LangChain Core has Path Traversal vulnerabilites in legacy `load_prompt` functions","problemTypes":[{"descriptions":[{"cweId":"CWE-22","lang":"en","description":"CWE-22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')","type":"CWE"}]}],"metrics":[{"cvssV3_1":{"attackComplexity":"LOW","attackVector":"NETWORK","availabilityImpact":"NONE","baseScore":7.5,"baseSeverity":"HIGH","confidentialityImpact":"HIGH","integrityImpact":"NONE","privilegesRequired":"NONE","scope":"UNCHANGED","userInteraction":"NONE","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N","version":"3.1"}}],"references":[{"name":"https://github.com/langchain-ai/langchain/security/advisories/GHSA-qh6h-p6c9-ff54","tags":["x_refsource_CONFIRM"],"url":"https://github.com/langchain-ai/langchain/security/advisories/GHSA-qh6h-p6c9-ff54"},{"name":"https://github.com/langchain-ai/langchain/commit/27add913474e01e33bededf4096151130ba0d47c","tags":["x_refsource_MISC"],"url":"https://github.com/langchain-ai/langchain/commit/27add913474e01e33bededf4096151130ba0d47c"},{"name":"https://github.com/langchain-ai/langchain/releases/tag/langchain-core==1.2.22","tags":["x_refsource_MISC"],"url":"https://github.com/langchain-ai/langchain/releases/tag/langchain-core==1.2.22"}],"affected":[{"vendor":"langchain-ai","product":"langchain","versions":[{"version":"< 1.2.22","status":"affected"}]}],"providerMetadata":{"orgId":"a0819718-46f1-4df5-94e2-005712e83aaa","shortName":"GitHub_M","dateUpdated":"2026-03-31T02:01:49.320Z"},"descriptions":[{"lang":"en","value":"LangChain is a framework for building agents and LLM-powered applications. Prior to version 1.2.22, multiple functions in langchain_core.prompts.loading read files from paths embedded in deserialized config dicts without validating against directory traversal or absolute path injection. When an application passes user-influenced prompt configurations to load_prompt() or load_prompt_from_config(), an attacker can read arbitrary files on the host filesystem, constrained only by file-extension checks (.txt for templates, .json/.yaml for examples). This issue has been patched in version 1.2.22."}],"source":{"advisory":"GHSA-qh6h-p6c9-ff54","discovery":"UNKNOWN"}},"adp":[{"references":[{"url":"https://github.com/langchain-ai/langchain/security/advisories/GHSA-qh6h-p6c9-ff54","tags":["exploit"]}],"metrics":[{"other":{"type":"ssvc","content":{"timestamp":"2026-03-31T15:17:33.597003Z","id":"CVE-2026-34070","options":[{"Exploitation":"poc"},{"Automatable":"yes"},{"Technical Impact":"partial"}],"role":"CISA Coordinator","version":"2.0.3"}}}],"title":"CISA ADP Vulnrichment","providerMetadata":{"orgId":"134c704f-9b21-4f2e-91b3-4a467353bcc0","shortName":"CISA-ADP","dateUpdated":"2026-03-31T18:04:59.283Z"}}]}}