summaryrefslogtreecommitdiffstats
path: root/dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py
blob: a291a745b7bd4d82653571f5a8236e4ca16ce555 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.

# -*- coding: utf-8 -*-
'''
Tests data_format CLI commands
'''
import os
import json

import pytest
from click.testing import CliRunner

from dcae_cli.cli import cli
from dcae_cli.catalog import MockCatalog


TEST_DIR = os.path.dirname(__file__)


def _get_spec(path):
    with open(path) as file:
        return json.load(file)


def test_basic(mock_cli_config, mock_db_url, tmpdir):
    obj = {'catalog': MockCatalog(purge_existing=True, db_name='dcae_cli.test.db', 
        enforce_image=False, db_url=mock_db_url),
           'config': {'user': 'test-user'}}

    runner = CliRunner()
    spec_file = os.path.join(TEST_DIR, 'mocked_components', 'model', 'int-class.format.json')
    cmd = "data_format add {:}".format(spec_file).split()

    # succeed the first time
    result = runner.invoke(cli, cmd, obj=obj)

    assert result.exit_code == 0

    # adding a duplicate is an error
    result = runner.invoke(cli, cmd, obj=obj)
    assert result.exit_code == 1
    assert 'exists' in result.output.lower()

    # allow updates
    cmd = "data_format add --update {:}".format(spec_file).split()
    result = runner.invoke(cli, cmd, obj=obj)
    assert result.exit_code == 0


    # light test of list format command
    cmd = 'data_format list'.split()
    df_spec = _get_spec(spec_file)
    df_name = df_spec['self']['name']
    assert df_name in runner.invoke(cli, cmd, obj=obj).output


    # light test of component info
    cmd = "data_format show {:}".format(df_name).split()
    spec_str = runner.invoke(cli, cmd, obj=obj).output
    assert df_spec == json.loads(spec_str)

    # test of generate
    bad_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'baddir')
    cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(bad_dir).split()
    err_str = runner.invoke(cli, cmd, obj=obj).output
    assert "does not exist" in err_str 

    empty_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'emptydir')
    try:
      os.stat(empty_dir)
    except:
      os.mkdir(empty_dir)
    cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(empty_dir).split()
    err_str = runner.invoke(cli, cmd, obj=obj).output
    assert "No JSON files found" in err_str 

    bad_json = os.path.join(TEST_DIR, 'mocked_components', 'model', 'badjson')
    cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(bad_json).split()
    err_str = runner.invoke(cli, cmd, obj=obj).output
    assert "Bad JSON file" in err_str 

    generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
    cmd = "data_format generate --keywords name:1.0.2 {:} ".format(generate_dir).split()
    actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
    expected = json.loads('{\n    "dataformatversion": "1.0.0", \n    "jsonschema": {\n        "$schema": "http://json-schema.org/draft-04/schema#", \n        "description": "", \n        "properties": {\n            "foobar": {\n                "description": "", \n                "maxLength": 0, \n                "minLength": 0, \n                "pattern": "", \n                "type": "string"\n            }, \n            "foobar2": {\n                "description": "", \n                "maxLength": 0, \n                "minLength": 0, \n                "pattern": "", \n                "type": "string"\n            }\n        }, \n        "type": "object"\n    }, \n    "self": {\n        "description": "", \n        "name": "name", \n        "version": "1.0.2"\n    }\n}\n')
    assert actual == expected

    generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
    cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
    actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
    expected = json.loads('{\n    "dataformatversion": "1.0.0", \n    "jsonschema": {\n        "$schema": "http://json-schema.org/draft-04/schema#", \n        "description": "", \n        "properties": {\n            "foobar": {\n                "description": "", \n                "type": "string"\n            }, \n            "foobar2": {\n                "description": "", \n                "type": "string"\n            }\n        }, \n        "type": "object"\n    }, \n    "self": {\n        "description": "", \n        "name": "name", \n        "version": "1.0.2"\n    }\n}\n'
            )
    assert actual == expected

    generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir', 'ex1.json')
    cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
    actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
    expected = json.loads('{\n    "dataformatversion": "1.0.0", \n    "jsonschema": {\n        "$schema": "http://json-schema.org/draft-04/schema#", \n        "additionalproperties": true, \n        "description": "", \n        "properties": {\n            "foobar": {\n                "description": "", \n                "type": "string"\n            }\n        }, \n        "required": [\n            "foobar"\n        ], \n        "type": "object"\n    }, \n    "self": {\n        "description": "", \n        "name": "name", \n        "version": "1.0.2"\n    }\n}\n')
    assert actual == expected


if __name__ == '__main__':
    '''Test area'''
    pytest.main([__file__, ])