summaryrefslogtreecommitdiffstats
path: root/dcae-cli/dcae_cli/commands
diff options
context:
space:
mode:
Diffstat (limited to 'dcae-cli/dcae_cli/commands')
-rw-r--r--dcae-cli/dcae_cli/commands/tests/test_component_cmd.py2
-rw-r--r--dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py16
2 files changed, 12 insertions, 6 deletions
diff --git a/dcae-cli/dcae_cli/commands/tests/test_component_cmd.py b/dcae-cli/dcae_cli/commands/tests/test_component_cmd.py
index a1e18d1..ea27068 100644
--- a/dcae-cli/dcae_cli/commands/tests/test_component_cmd.py
+++ b/dcae-cli/dcae_cli/commands/tests/test_component_cmd.py
@@ -26,6 +26,7 @@ import os
import json
from click.testing import CliRunner
import time
+import pytest
from dcae_cli.cli import cli
from dcae_cli.catalog import MockCatalog
@@ -102,6 +103,7 @@ def test_comp_docker(obj=None):
assert comp_model_spec == json.loads(spec_str)
+@pytest.mark.skip(reason="This is not a pure unit test. Need a way to setup dependencies and trigger in the appropriate stages of testing.")
def test_comp_cdap(obj=None):
"""
This is not a unit test. It is bigger than that. It Does a full "workflow" test:
diff --git a/dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py b/dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py
index b8402f6..8ef4c9b 100644
--- a/dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py
+++ b/dcae-cli/dcae_cli/commands/tests/test_data_format_cmd.py
@@ -99,18 +99,22 @@ def test_basic():
generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
cmd = "data_format generate --keywords name:1.0.2 {:} ".format(generate_dir).split()
- out_str = runner.invoke(cli, cmd, obj=obj).output
- assert '{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n' == out_str
+ actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
+ expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n')
+ assert actual == expected
generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
- out_str = runner.invoke(cli, cmd, obj=obj).output
- assert '{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n' == out_str
+ actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
+ expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n'
+ )
+ assert actual == expected
generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir', 'ex1.json')
cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
- out_str = runner.invoke(cli, cmd, obj=obj).output
- assert '{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "additionalproperties": true, \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }\n }, \n "required": [\n "foobar"\n ], \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n' == out_str
+ actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
+ expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "additionalproperties": true, \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }\n }, \n "required": [\n "foobar"\n ], \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n')
+ assert actual == expected
if __name__ == '__main__':