Files changed (1) hide show
  1. OpenGVLab_InternVL-Chat-V1-5.json +77 -0
OpenGVLab_InternVL-Chat-V1-5.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bomFormat": "CycloneDX",
3
+ "specVersion": "1.6",
4
+ "serialNumber": "urn:uuid:0fd73414-9e65-47c5-8598-9745c0e73210",
5
+ "version": 1,
6
+ "metadata": {
7
+ "timestamp": "2025-06-05T09:41:23.012895+00:00",
8
+ "component": {
9
+ "type": "machine-learning-model",
10
+ "bom-ref": "OpenGVLab/InternVL-Chat-V1-5-c62ebeb8-2b6a-572f-945a-ebc349c329f4",
11
+ "name": "OpenGVLab/InternVL-Chat-V1-5",
12
+ "externalReferences": [
13
+ {
14
+ "url": "https://huggingface.co/OpenGVLab/InternVL-Chat-V1-5",
15
+ "type": "documentation"
16
+ }
17
+ ],
18
+ "modelCard": {
19
+ "modelParameters": {
20
+ "task": "image-text-to-text",
21
+ "architectureFamily": "internvl_chat",
22
+ "modelArchitecture": "InternVLChatModel"
23
+ },
24
+ "properties": [
25
+ {
26
+ "name": "library_name",
27
+ "value": "transformers"
28
+ },
29
+ {
30
+ "name": "base_model",
31
+ "value": "OpenGVLab/InternViT-6B-448px-V1-5, internlm/internlm2-chat-20b"
32
+ },
33
+ {
34
+ "name": "base_model_relation",
35
+ "value": "merge"
36
+ }
37
+ ]
38
+ },
39
+ "authors": [
40
+ {
41
+ "name": "OpenGVLab"
42
+ }
43
+ ],
44
+ "licenses": [
45
+ {
46
+ "license": {
47
+ "id": "MIT",
48
+ "url": "https://spdx.org/licenses/MIT.html"
49
+ }
50
+ }
51
+ ],
52
+ "description": "<p align=\"center\"><img src=\"https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/D60YzQBIzvoCvLRp2gZ0A.jpeg\" alt=\"Image Description\" width=\"300\" height=\"300\"></p>> _Two interns holding hands, symbolizing the integration of InternViT and InternLM._We introduce InternVL 1.5, an open-source multimodal large language model (MLLM) to bridge the capability gap between open-source and proprietary commercial models in multimodal understanding.We introduce three simple designs:1. **Strong Vision Encoder:** we explored a continuous learning strategy for the large-scale vision foundation model---InternViT-6B, boosting its visual understanding capabilities, and making it can be transferred and reused in different LLMs.2. **Dynamic High-Resolution:** we divide images into tiles ranging from 1 to 40 of 448 \u00d7 448 pixels according to the aspect ratio and resolution of the input images, which supports up to 4K resolution input during inference.3. **High-Quality Bilingual Dataset:** we carefully collected a high-quality bilingual dataset that covers common scenes, document images, and annotated them with English and Chinese question-answer pairs, significantly enhancing performance in OCR- and Chinese-related tasks.",
53
+ "tags": [
54
+ "transformers",
55
+ "tensorboard",
56
+ "safetensors",
57
+ "internvl_chat",
58
+ "feature-extraction",
59
+ "internvl",
60
+ "custom_code",
61
+ "image-text-to-text",
62
+ "conversational",
63
+ "multilingual",
64
+ "arxiv:2312.14238",
65
+ "arxiv:2404.16821",
66
+ "arxiv:2410.16261",
67
+ "arxiv:2412.05271",
68
+ "base_model:OpenGVLab/InternViT-6B-448px-V1-5",
69
+ "base_model:merge:OpenGVLab/InternViT-6B-448px-V1-5",
70
+ "base_model:internlm/internlm2-chat-20b",
71
+ "base_model:merge:internlm/internlm2-chat-20b",
72
+ "license:mit",
73
+ "region:us"
74
+ ]
75
+ }
76
+ }
77
+ }