{"dataType":"CVE_RECORD","dataVersion":"5.1","cveMetadata":{"cveId":"CVE-2024-34359","assignerOrgId":"a0819718-46f1-4df5-94e2-005712e83aaa","state":"PUBLISHED","assignerShortName":"GitHub_M","dateReserved":"2024-05-02T06:36:32.439Z","datePublished":"2024-05-10T17:07:18.850Z","dateUpdated":"2024-08-02T02:51:10.739Z"},"containers":{"cna":{"title":"llama-cpp-python vulnerable to Remote Code Execution by Server-Side Template Injection in Model Metadata","problemTypes":[{"descriptions":[{"cweId":"CWE-76","lang":"en","description":"CWE-76: Improper Neutralization of Equivalent Special Elements","type":"CWE"}]}],"metrics":[{"cvssV3_1":{"attackComplexity":"LOW","attackVector":"NETWORK","availabilityImpact":"HIGH","baseScore":9.7,"baseSeverity":"CRITICAL","confidentialityImpact":"HIGH","integrityImpact":"HIGH","privilegesRequired":"NONE","scope":"CHANGED","userInteraction":"REQUIRED","vectorString":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:H/I:H/A:H","version":"3.1"}}],"references":[{"name":"https://github.com/abetlen/llama-cpp-python/security/advisories/GHSA-56xg-wfcc-g829","tags":["x_refsource_CONFIRM"],"url":"https://github.com/abetlen/llama-cpp-python/security/advisories/GHSA-56xg-wfcc-g829"},{"name":"https://github.com/abetlen/llama-cpp-python/commit/b454f40a9a1787b2b5659cd2cb00819d983185df","tags":["x_refsource_MISC"],"url":"https://github.com/abetlen/llama-cpp-python/commit/b454f40a9a1787b2b5659cd2cb00819d983185df"}],"affected":[{"vendor":"abetlen","product":"llama-cpp-python","versions":[{"version":">= 0.2.30, <= 0.2.71","status":"affected"}]}],"providerMetadata":{"orgId":"a0819718-46f1-4df5-94e2-005712e83aaa","shortName":"GitHub_M","dateUpdated":"2024-05-10T17:07:18.850Z"},"descriptions":[{"lang":"en","value":"llama-cpp-python is the Python bindings for llama.cpp. `llama-cpp-python` depends on class `Llama` in `llama.py` to load `.gguf` llama.cpp or Latency Machine Learning Models. The `__init__` constructor built in the `Llama` takes several parameters to configure the loading and running of the model. Other than `NUMA, LoRa settings`, `loading tokenizers,` and `hardware settings`, `__init__` also loads the `chat template` from targeted `.gguf` 's Metadata and furtherly parses it to `llama_chat_format.Jinja2ChatFormatter.to_chat_handler()` to construct the `self.chat_handler` for this model. Nevertheless, `Jinja2ChatFormatter` parse the `chat template` within the Metadate with sandbox-less `jinja2.Environment`, which is furthermore rendered in `__call__` to construct the `prompt` of interaction. This allows `jinja2` Server Side Template Injection which leads to remote code execution by a carefully constructed payload."}],"source":{"advisory":"GHSA-56xg-wfcc-g829","discovery":"UNKNOWN"}},"adp":[{"affected":[{"vendor":"abetlen","product":"llama-cpp-python","cpes":["cpe:2.3:a:abetlen:llama-cpp-python:*:*:*:*:*:*:*:*"],"defaultStatus":"unknown","versions":[{"version":"0.2.30","status":"affected","lessThanOrEqual":"0.2.71","versionType":"custom"}]}],"metrics":[{"other":{"type":"ssvc","content":{"timestamp":"2024-05-15T19:35:24.408358Z","id":"CVE-2024-34359","options":[{"Exploitation":"poc"},{"Automatable":"no"},{"Technical Impact":"total"}],"role":"CISA Coordinator","version":"2.0.3"}}}],"title":"CISA ADP Vulnrichment","providerMetadata":{"orgId":"134c704f-9b21-4f2e-91b3-4a467353bcc0","shortName":"CISA-ADP","dateUpdated":"2024-06-06T18:29:15.313Z"}},{"providerMetadata":{"orgId":"af854a3a-2127-422b-91ae-364da2661108","shortName":"CVE","dateUpdated":"2024-08-02T02:51:10.739Z"},"title":"CVE Program Container","references":[{"name":"https://github.com/abetlen/llama-cpp-python/security/advisories/GHSA-56xg-wfcc-g829","tags":["x_refsource_CONFIRM","x_transferred"],"url":"https://github.com/abetlen/llama-cpp-python/security/advisories/GHSA-56xg-wfcc-g829"},{"name":"https://github.com/abetlen/llama-cpp-python/commit/b454f40a9a1787b2b5659cd2cb00819d983185df","tags":["x_refsource_MISC","x_transferred"],"url":"https://github.com/abetlen/llama-cpp-python/commit/b454f40a9a1787b2b5659cd2cb00819d983185df"}]}]}}