Skip to content

Commit

Permalink
[Fix] remove tests/quantization (vllm-project#99)
Browse files Browse the repository at this point in the history
* initial commit

* woops

---------

Co-authored-by: [email protected] <[email protected]>
  • Loading branch information
dbogunowicz and [email protected] authored Jul 1, 2024
1 parent 3db8471 commit cff8a10
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 46 deletions.
13 changes: 0 additions & 13 deletions tests/quantization/__init__.py

This file was deleted.

32 changes: 0 additions & 32 deletions tests/quantization/test_quant_config.py

This file was deleted.

17 changes: 16 additions & 1 deletion tests/test_quantization/test_quant_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.


import pytest
from compressed_tensors.quantization import (
DEFAULT_QUANTIZATION_FORMAT,
Expand Down Expand Up @@ -60,3 +59,19 @@ def test_full_config():
def test_need_config_groups():
with pytest.raises(ValidationError):
_ = QuantizationScheme()


@pytest.mark.parametrize(
"scheme_name",
[
"W8A8",
"W4A16",
],
)
def test_load_scheme_from_preset(scheme_name: str):
targets = ["Linear"]
config = QuantizationConfig(config_groups={scheme_name: targets})

assert scheme_name in config.config_groups
assert isinstance(config.config_groups[scheme_name], QuantizationScheme)
assert config.config_groups[scheme_name].targets == targets

0 comments on commit cff8a10

Please sign in to comment.