Testing HuggingFace ALBERT Model Integration in ColossalAI
This test suite validates the functionality of ALBERT models from Hugging Face within the ColossalAI framework. It focuses on ensuring proper model tracing and output comparison for various ALBERT model configurations, while handling version compatibility and memory management.
Test Coverage Overview
Implementation Analysis
Technical Details
Best Practices Demonstrated
hpcaitech/colossalai
tests/test_fx/test_tracer/test_hf_model/test_hf_albert.py
import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
BATCH_SIZE = 2
SEQ_LENGTH = 16
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_albert():
sub_registry = model_zoo.get_sub_registry("transformers_albert")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
model = model_fn()
# TODO: support the following models
# 1. "AlbertForPreTraining"
# as they are not supported, let's skip them
if model.__class__.__name__ in ["AlbertForPreTraining"]:
continue
trace_model_and_compare_output(model, data_gen_fn)
if __name__ == "__main__":
test_albert()