/ data / vuln_en / langchain / CVE-2023-38860.yaml
CVE-2023-38860.yaml
 1  info:
 2    name: langchain
 3    cve: CVE-2023-38860
 4    summary: LangChain vulnerable to arbitrary code execution
 5    details: |
 6      An issue in LangChain prior to v.0.0.247 allows a remote attacker to execute arbitrary code via the prompt parameter.
 7    cvss: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H
 8    severity: CRITICAL
 9    security_advise: |
10      1. Upgrade to langchain>=0.0.247
11      2. Review and restrict the use of the prompt parameter in API calls
12      3. Implement input validation to prevent malicious code injection
13  rule: version < "0.0.247"
14  references:
15    - https://nvd.nist.gov/vuln/detail/CVE-2023-38860
16    - https://github.com/hwchase17/langchain/issues/7641
17    - https://github.com/langchain-ai/langchain/issues/7641
18    - https://github.com/langchain-ai/langchain/pull/8092
19    - https://github.com/langchain-ai/langchain/pull/8425
20    - https://github.com/langchain-ai/langchain/commit/d353d668e4b0514122a443cef91de7f76fea4245
21    - https://github.com/langchain-ai/langchain/commit/fab24457bcf8ede882abd11419769c92bc4e7751
22    - https://github.com/hwchase17/langchain
23    - https://github.com/pypa/advisory-database/tree/main/vulns/langchain/PYSEC-2023-145.yaml