{"api_version":"1","generated_at":"2026-05-12T22:03:39+00:00","cve":"CVE-2026-44222","urls":{"html":"https://cve.report/CVE-2026-44222","api":"https://cve.report/api/cve/CVE-2026-44222.json","docs":"https://cve.report/api","cve_org":"https://www.cve.org/CVERecord?id=CVE-2026-44222","nvd":"https://nvd.nist.gov/vuln/detail/CVE-2026-44222"},"summary":{"title":"vLLM: Remote DoS via Special-Token Placeholders","description":"vLLM is an inference and serving engine for large language models (LLMs). From 0.6.1 to before 0.20.0, there is a a Token Injection vulnerability in vLLM’s multimodal processing. Unauthenticated, text-only prompts that spell special tokens are interpreted as control. Image and video placeholder sequences supplied without matching data cause vLLM to index into empty grids during input-position computation, raising an unhandled IndexError and terminating the worker or degrading availability. Multimodal paths that rely on image_grid_thw/video_grid_thw are affected. This vulnerability is fixed in 0.20.0.","state":"PUBLISHED","assigner":"GitHub_M","published_at":"2026-05-12 20:16:43","updated_at":"2026-05-12 20:16:43"},"problem_types":["CWE-129","CWE-129 CWE-129: Improper Validation of Array Index"],"metrics":[{"version":"3.1","source":"security-advisories@github.com","type":"Secondary","score":"6.5","severity":"MEDIUM","vector":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","data":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","baseScore":6.5,"baseSeverity":"MEDIUM","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"LOW","userInteraction":"NONE","scope":"UNCHANGED","confidentialityImpact":"NONE","integrityImpact":"NONE","availabilityImpact":"HIGH"}},{"version":"3.1","source":"CNA","type":"DECLARED","score":"6.5","severity":"MEDIUM","vector":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","data":{"attackComplexity":"LOW","attackVector":"NETWORK","availabilityImpact":"HIGH","baseScore":6.5,"baseSeverity":"MEDIUM","confidentialityImpact":"NONE","integrityImpact":"NONE","privilegesRequired":"LOW","scope":"UNCHANGED","userInteraction":"NONE","vectorString":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","version":"3.1"}}],"references":[{"url":"https://github.com/vllm-project/vllm/issues/32656","name":"https://github.com/vllm-project/vllm/issues/32656","refsource":"security-advisories@github.com","tags":[],"title":"","mime":"","httpstatus":"","archivestatus":"0"},{"url":"https://github.com/vllm-project/vllm/security/advisories/GHSA-hpv8-x276-m59f","name":"https://github.com/vllm-project/vllm/security/advisories/GHSA-hpv8-x276-m59f","refsource":"security-advisories@github.com","tags":[],"title":"","mime":"","httpstatus":"","archivestatus":"0"},{"url":"https://www.cve.org/CVERecord?id=CVE-2026-44222","name":"CVE Program record","refsource":"CVE.ORG","tags":["canonical"]},{"url":"https://nvd.nist.gov/vuln/detail/CVE-2026-44222","name":"NVD vulnerability detail","refsource":"NVD","tags":["canonical","analysis"]}],"affected":[{"source":"CNA","vendor":"vllm-project","product":"vllm","version":"affected >= 0.6.1, < 0.20.0","platforms":[]}],"timeline":[],"solutions":[],"workarounds":[],"exploits":[],"credits":[],"nvd_cpes":[],"vendor_comments":[],"enrichments":{"kev":null,"epss":null,"legacy_qids":[]},"source_records":{"cve_program":{"containers":{"cna":{"affected":[{"product":"vllm","vendor":"vllm-project","versions":[{"status":"affected","version":">= 0.6.1, < 0.20.0"}]}],"descriptions":[{"lang":"en","value":"vLLM is an inference and serving engine for large language models (LLMs). From 0.6.1 to before 0.20.0, there is a a Token Injection vulnerability in vLLM’s multimodal processing. Unauthenticated, text-only prompts that spell special tokens are interpreted as control. Image and video placeholder sequences supplied without matching data cause vLLM to index into empty grids during input-position computation, raising an unhandled IndexError and terminating the worker or degrading availability. Multimodal paths that rely on image_grid_thw/video_grid_thw are affected. This vulnerability is fixed in 0.20.0."}],"metrics":[{"cvssV3_1":{"attackComplexity":"LOW","attackVector":"NETWORK","availabilityImpact":"HIGH","baseScore":6.5,"baseSeverity":"MEDIUM","confidentialityImpact":"NONE","integrityImpact":"NONE","privilegesRequired":"LOW","scope":"UNCHANGED","userInteraction":"NONE","vectorString":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","version":"3.1"}}],"problemTypes":[{"descriptions":[{"cweId":"CWE-129","description":"CWE-129: Improper Validation of Array Index","lang":"en","type":"CWE"}]}],"providerMetadata":{"dateUpdated":"2026-05-12T19:57:25.336Z","orgId":"a0819718-46f1-4df5-94e2-005712e83aaa","shortName":"GitHub_M"},"references":[{"name":"https://github.com/vllm-project/vllm/security/advisories/GHSA-hpv8-x276-m59f","tags":["x_refsource_CONFIRM"],"url":"https://github.com/vllm-project/vllm/security/advisories/GHSA-hpv8-x276-m59f"},{"name":"https://github.com/vllm-project/vllm/issues/32656","tags":["x_refsource_MISC"],"url":"https://github.com/vllm-project/vllm/issues/32656"}],"source":{"advisory":"GHSA-hpv8-x276-m59f","discovery":"UNKNOWN"},"title":"vLLM: Remote DoS via Special-Token Placeholders"}},"cveMetadata":{"assignerOrgId":"a0819718-46f1-4df5-94e2-005712e83aaa","assignerShortName":"GitHub_M","cveId":"CVE-2026-44222","datePublished":"2026-05-12T19:57:25.336Z","dateReserved":"2026-05-05T15:42:40.518Z","dateUpdated":"2026-05-12T19:57:25.336Z","state":"PUBLISHED"},"dataType":"CVE_RECORD","dataVersion":"5.2"},"nvd":{"publishedDate":"2026-05-12 20:16:43","lastModifiedDate":"2026-05-12 20:16:43","problem_types":["CWE-129","CWE-129 CWE-129: Improper Validation of Array Index"],"metrics":{"cvssMetricV31":[{"source":"security-advisories@github.com","type":"Secondary","cvssData":{"version":"3.1","vectorString":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H","baseScore":6.5,"baseSeverity":"MEDIUM","attackVector":"NETWORK","attackComplexity":"LOW","privilegesRequired":"LOW","userInteraction":"NONE","scope":"UNCHANGED","confidentialityImpact":"NONE","integrityImpact":"NONE","availabilityImpact":"HIGH"},"exploitabilityScore":2.8,"impactScore":3.6}]},"configurations":[]},"legacy_mitre":{"record":{"CveYear":"2026","CveId":"44222","Ordinal":"1","Title":"vLLM: Remote DoS via Special-Token Placeholders","CVE":"CVE-2026-44222","Year":"2026"},"notes":[{"CveYear":"2026","CveId":"44222","Ordinal":"1","NoteData":"vLLM is an inference and serving engine for large language models (LLMs). From 0.6.1 to before 0.20.0, there is a a Token Injection vulnerability in vLLM’s multimodal processing. Unauthenticated, text-only prompts that spell special tokens are interpreted as control. Image and video placeholder sequences supplied without matching data cause vLLM to index into empty grids during input-position computation, raising an unhandled IndexError and terminating the worker or degrading availability. Multimodal paths that rely on image_grid_thw/video_grid_thw are affected. This vulnerability is fixed in 0.20.0.","Type":"Description","Title":"vLLM: Remote DoS via Special-Token Placeholders"}]}}}