diff options
author | ys9693 <ys9693@att.com> | 2020-01-19 13:50:02 +0200 |
---|---|---|
committer | Ofir Sonsino <ofir.sonsino@intl.att.com> | 2020-01-22 12:33:31 +0000 |
commit | 16a9fce0e104a38371a9e5a567ec611ae3fc7f33 (patch) | |
tree | 03a2aff3060ddb5bc26a90115805a04becbaffc9 /asdctool/src/main/resources | |
parent | aa83a2da4f911c3ac89318b8e9e8403b072942e1 (diff) |
Catalog alignment
Issue-ID: SDC-2724
Signed-off-by: ys9693 <ys9693@att.com>
Change-Id: I52b4aacb58cbd432ca0e1ff7ff1f7dd52099c6fe
Diffstat (limited to 'asdctool/src/main/resources')
29 files changed, 176 insertions, 1721 deletions
diff --git a/asdctool/src/main/resources/application-context.xml b/asdctool/src/main/resources/application-context.xml index c9a13df44a..252b951895 100644 --- a/asdctool/src/main/resources/application-context.xml +++ b/asdctool/src/main/resources/application-context.xml @@ -1,11 +1,5 @@ <?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xmlns:util="http://www.springframework.org/schema/util" xsi:schemaLocation=" - http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd - http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.0.xsd"> - - - <util:properties id="elasticsearchConfig" location="file:${config.home}/elasticsearch.yml" /> - + http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd"> </beans> diff --git a/asdctool/src/main/resources/config/configuration.yaml b/asdctool/src/main/resources/config/configuration.yaml index cc7a3cf295..93bb2de2be 100644 --- a/asdctool/src/main/resources/config/configuration.yaml +++ b/asdctool/src/main/resources/config/configuration.yaml @@ -23,12 +23,13 @@ beSslPort: 8443 version: 1.0 released: 2012-11-30 -toscaConformanceLevel: 8.0 +toscaConformanceLevel: 12.0 minToscaConformanceLevel: 3.0 # These values are necessary for running upgrade migration 1710.0 process enableAutoHealing: false appVersion: 1.1.0 +artifactGeneratorConfig: Artifact-Generator.properties resourcesForUpgrade: 8.0: - org.openecomp.resource.cp.extCP @@ -53,8 +54,6 @@ janusGraphReconnectIntervalInSeconds: 3 # The read timeout towards JanusGraph DB when health check is invoked: janusGraphHealthCheckReadTimeout: 1 -# The interval to try and reconnect to Elasticsearch when it is down during ASDC startup: -esReconnectIntervalInSeconds: 3 uebHealthCheckReconnectIntervalInSeconds: 15 uebHealthCheckReadTimeout: 4 @@ -107,30 +106,6 @@ cassandraConfig: - { name: sdccomponent, replicationStrategy: NetworkTopologyStrategy, replicationInfo: ['DC-AIO-Ubuntu1', '1']} - { name: sdcrepository, replicationStrategy: NetworkTopologyStrategy, replicationInfo: ['DC-AIO-Ubuntu1', '1']} - -#Application-specific settings of ES -elasticSearch: - # Mapping of index prefix to time-based frame. For example, if below is configured: - # - # - indexPrefix: auditingevents - # creationPeriod: minute - # - # then ES object of type which is mapped to "auditingevents-*" template, and created on 2015-12-23 13:24:54, will enter "auditingevents-2015-12-23-13-24" index. - # Another object created on 2015-12-23 13:25:54, will enter "auditingevents-2015-12-23-13-25" index. - # If creationPeriod: month, both of the above will enter "auditingevents-2015-12" index. - # - # PLEASE NOTE: the timestamps are created in UTC/GMT timezone! This is needed so that timestamps will be correctly presented in Kibana. - # - # Legal values for creationPeriod - year, month, day, hour, minute, none (meaning no time-based behaviour). - # - # If no creationPeriod is configured for indexPrefix, default behavour is creationPeriod: month. - - indicesTimeFrequency: - - indexPrefix: auditingevents - creationPeriod: month - - indexPrefix: monitoring_events - creationPeriod: month - artifactTypes: - CHEF - PUPPET @@ -176,11 +151,6 @@ resourceTypes: &allResourceTypes # - VF # - VL deploymentResourceArtifacts: - cdsBlueprint: - displayName: "CDS Blueprint" - type: CONTROLLER_BLUEPRINT_ARCHIVE - description: "CDS deployment artifact" - fileExtension: "zip" # heat: # displayName: "Base HEAT Template" # type: HEAT @@ -318,7 +288,10 @@ systemMonitoring: isProxy: false probeIntervalInSeconds: 15 -defaultHeatArtifactTimeoutMinutes: 60 +heatArtifactDeploymentTimeout: + defaultMinutes: 30 + minMinutes: 1 + maxMinutes: 120 serviceDeploymentArtifacts: CONTROLLER_BLUEPRINT_ARCHIVE: @@ -506,7 +479,6 @@ resourceDeploymentArtifacts: ONBOARDED_PACKAGE: acceptedTypes: - csar - - zip validForResourceTypes: - VF - PNF @@ -614,10 +586,6 @@ resourceInformationalArtifacts: resourceInformationalDeployedArtifacts: -requirementsToFulfillBeforeCert: - -capabilitiesToConsumeBeforeCert: - unLoggedUrls: - /sdc2/rest/healthCheck @@ -701,3 +669,59 @@ genericAssetNodeTypes: VF : org.openecomp.resource.abstract.nodes.VF PNF: org.openecomp.resource.abstract.nodes.PNF Service: org.openecomp.resource.abstract.nodes.service + +dmaapConsumerConfiguration: + hosts: olsd004.wnsnet.attws.com:3905 + consumerGroup: asdc + consumerId: mama #mama - in Order To Consume Remove This String And Replace It With -> mama + timeoutMs: 15000 + limit: 1 + pollingInterval: 2 + topic: com.att.sdc.23911-SDCforTestDev-v001 + latitude: 32.109333 + longitude: 34.855499 + version: 1.0 + serviceName: dmaap-v1.dev.dmaap.dt.saat.acsi.att.com/events + environment: TEST + partner: BOT_R + routeOffer: MR1 + protocol: https + contenttype: application/json + dme2TraceOn: true + aftEnvironment: AFTUAT + aftDme2ConnectionTimeoutMs: 15000 + aftDme2RoundtripTimeoutMs: 240000 + aftDme2ReadTimeoutMs: 50000 + dme2preferredRouterFilePath: DME2preferredRouter.txt + timeLimitForNotificationHandleMs: 120000 + credential: + username: m09875@sdc.att.com + password: hmXYcznAljMSisdy8zgcag== + +dmaapProducerConfiguration: + hosts: olsd004.wnsnet.attws.com:3905 + consumerGroup: asdc + consumerId: mama #mama - in Order To Consume Remove This String And Replace It With -> mama + timeoutMs: 15000 + limit: 1 + pollingInterval: 2 + topic: com.att.sdc.23911-SDCforTestDev-v001 + latitude: 32.109333 + longitude: 34.855499 + version: 1.0 + serviceName: dmaap-v1.dev.dmaap.dt.saat.acsi.att.com/events + environment: TEST + partner: BOT_R + routeOffer: MR1 + protocol: https + contenttype: application/json + dme2TraceOn: true + aftEnvironment: AFTUAT + aftDme2ConnectionTimeoutMs: 15000 + aftDme2RoundtripTimeoutMs: 240000 + aftDme2ReadTimeoutMs: 50000 + dme2preferredRouterFilePath: DME2preferredRouter.txt + timeLimitForNotificationHandleMs: 120000 + credential: + username: m09875@sdc.att.com + password: hmXYcznAljMSisdy8zgcag==
\ No newline at end of file diff --git a/asdctool/src/main/resources/config/dataTypes.yml b/asdctool/src/main/resources/config/dataTypes.yml index d768bffe78..43c7f0c844 100644 --- a/asdctool/src/main/resources/config/dataTypes.yml +++ b/asdctool/src/main/resources/config/dataTypes.yml @@ -114,12 +114,12 @@ org.openecomp.datatypes.heat.network.AddressPair: type: string description: MAC address required: false - status: supported + status: SUPPORTED ip_address: type: string description: IP address required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.subnet.HostRoute: derived_from: tosca.datatypes.Root description: Host route info for the subnet @@ -128,12 +128,12 @@ org.openecomp.datatypes.heat.network.subnet.HostRoute: type: string description: The destination for static route required: false - status: supported + status: SUPPORTED nexthop: type: string description: The next hop for the destination required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.AllocationPool: derived_from: tosca.datatypes.Root @@ -143,12 +143,12 @@ org.openecomp.datatypes.heat.network.AllocationPool: type: string description: Start address for the allocation pool required: false - status: supported + status: SUPPORTED end: type: string description: End address for the allocation pool required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.neutron.Subnet: derived_from: tosca.datatypes.Root @@ -158,18 +158,18 @@ org.openecomp.datatypes.heat.network.neutron.Subnet: type: string description: The ID of the tenant who owns the network required: false - status: supported + status: SUPPORTED enable_dhcp: type: boolean description: Set to true if DHCP is enabled and false if DHCP is disabled required: false default: true - status: supported + status: SUPPORTED ipv6_address_mode: type: string description: IPv6 address mode required: false - status: supported + status: SUPPORTED constraints: - valid_values: - dhcpv6-stateful @@ -179,7 +179,7 @@ org.openecomp.datatypes.heat.network.neutron.Subnet: type: string description: IPv6 RA (Router Advertisement) mode required: false - status: supported + status: SUPPORTED constraints: - valid_values: - dhcpv6-stateful @@ -191,35 +191,35 @@ org.openecomp.datatypes.heat.network.neutron.Subnet: required: false default: { } - status: supported + status: SUPPORTED entry_schema: type: string allocation_pools: type: list description: The start and end addresses for the allocation pools required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.network.AllocationPool subnetpool: type: string description: The name or ID of the subnet pool required: false - status: supported + status: SUPPORTED dns_nameservers: type: list description: A specified set of DNS name servers to be used required: false default: [ ] - status: supported + status: SUPPORTED entry_schema: type: string host_routes: type: list description: The gateway IP address required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.network.subnet.HostRoute ip_version: @@ -227,7 +227,7 @@ org.openecomp.datatypes.heat.network.neutron.Subnet: description: The gateway IP address required: false default: 4 - status: supported + status: SUPPORTED constraints: - valid_values: - '4' @@ -236,24 +236,24 @@ org.openecomp.datatypes.heat.network.neutron.Subnet: type: string description: The name of the subnet required: false - status: supported + status: SUPPORTED prefixlen: type: integer description: Prefix length for subnet allocation from subnet pool required: false - status: supported + status: SUPPORTED constraints: - greater_or_equal: 0 cidr: type: string description: The CIDR required: false - status: supported + status: SUPPORTED gateway_ip: type: string description: The gateway IP address required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.novaServer.network.PortExtraProperties: derived_from: tosca.datatypes.Root @@ -263,35 +263,35 @@ org.openecomp.datatypes.heat.novaServer.network.PortExtraProperties: type: boolean description: Flag to enable/disable port security on the port required: false - status: supported + status: SUPPORTED mac_address: type: string description: MAC address to give to this port required: false - status: supported + status: SUPPORTED admin_state_up: type: boolean description: The administrative state of this port required: false default: true - status: supported + status: SUPPORTED qos_policy: type: string description: The name or ID of QoS policy to attach to this port required: false - status: supported + status: SUPPORTED allowed_address_pairs: type: list description: Additional MAC/IP address pairs allowed to pass through the port required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.network.AddressPair binding:vnic_type: type: string description: The vnic type to be bound on the neutron port required: false - status: supported + status: SUPPORTED constraints: - valid_values: - macvtap @@ -303,7 +303,7 @@ org.openecomp.datatypes.heat.novaServer.network.PortExtraProperties: required: false default: { } - status: supported + status: SUPPORTED entry_schema: type: string org.openecomp.datatypes.heat.novaServer.network.AddressInfo: @@ -314,7 +314,7 @@ org.openecomp.datatypes.heat.novaServer.network.AddressInfo: type: string description: Port id required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.neutron.port.FixedIps: derived_from: tosca.datatypes.Root description: subnet/ip_address @@ -323,12 +323,12 @@ org.openecomp.datatypes.heat.neutron.port.FixedIps: type: string description: Subnet in which to allocate the IP address for this port required: false - status: supported + status: SUPPORTED ip_address: type: string description: IP address desired in the subnet for this port required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.FileInfo: derived_from: tosca.datatypes.Root description: Heat File Info @@ -337,12 +337,12 @@ org.openecomp.datatypes.heat.FileInfo: type: string description: The required URI string (relative or absolute) which can be used to locate the file required: true - status: supported + status: SUPPORTED file_type: type: string description: The type of the file required: true - status: supported + status: SUPPORTED constraints: - valid_values: - base @@ -357,12 +357,12 @@ org.openecomp.datatypes.heat.contrail.network.rule.PortPairs: type: string description: Start port required: false - status: supported + status: SUPPORTED end_port: type: string description: End port required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrail.network.rule.Rule: derived_from: tosca.datatypes.Root description: policy rule @@ -371,45 +371,45 @@ org.openecomp.datatypes.heat.contrail.network.rule.Rule: type: list description: Source ports required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrail.network.rule.PortPairs protocol: type: string description: Protocol required: false - status: supported + status: SUPPORTED dst_addresses: type: list description: Destination addresses required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrail.network.rule.VirtualNetwork apply_service: type: string description: Service to apply required: false - status: supported + status: SUPPORTED dst_ports: type: list description: Destination ports required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrail.network.rule.PortPairs src_addresses: type: list description: Source addresses required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrail.network.rule.VirtualNetwork direction: type: string description: Direction required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrail.network.rule.RuleList: derived_from: tosca.datatypes.Root description: list of policy rules @@ -418,7 +418,7 @@ org.openecomp.datatypes.heat.contrail.network.rule.RuleList: type: list description: Contrail network rule required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrail.network.rule.Rule org.openecomp.datatypes.heat.contrail.network.rule.VirtualNetwork: @@ -429,7 +429,7 @@ org.openecomp.datatypes.heat.contrail.network.rule.VirtualNetwork: type: string description: Virtual network required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: derived_from: tosca.datatypes.Root @@ -439,12 +439,12 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: type: string description: The remote group ID to be associated with this security group rule required: false - status: supported + status: SUPPORTED protocol: type: string description: The protocol that is matched by the security group rule required: false - status: supported + status: SUPPORTED constraints: - valid_values: - tcp @@ -455,7 +455,7 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: description: Ethertype of the traffic required: false default: IPv4 - status: supported + status: SUPPORTED constraints: - valid_values: - IPv4 @@ -465,7 +465,7 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: description: 'The maximum port number in the range that is matched by the security group rule. ' required: false - status: supported + status: SUPPORTED constraints: - in_range: - 0 @@ -474,13 +474,13 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: type: string description: The remote IP prefix (CIDR) to be associated with this security group rule required: false - status: supported + status: SUPPORTED remote_mode: type: string description: Whether to specify a remote group or a remote IP prefix required: false default: remote_ip_prefix - status: supported + status: SUPPORTED constraints: - valid_values: - remote_ip_prefix @@ -490,7 +490,7 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: description: The direction in which the security group rule is applied required: false default: ingress - status: supported + status: SUPPORTED constraints: - valid_values: - egress @@ -499,7 +499,7 @@ org.openecomp.datatypes.heat.network.neutron.SecurityRules.Rule: type: integer description: The minimum port number in the range that is matched by the security group rule. required: false - status: supported + status: SUPPORTED constraints: - in_range: - 0 @@ -512,13 +512,13 @@ org.openecomp.datatypes.heat.substitution.SubstitutionFiltering: type: string description: Substitute Service Template required: true - status: supported + status: SUPPORTED index_value: type: integer description: Index value of the substitution service template runtime instance required: false default: 0 - status: supported + status: SUPPORTED constraints: - greater_or_equal: 0 count: @@ -526,19 +526,19 @@ org.openecomp.datatypes.heat.substitution.SubstitutionFiltering: description: Count required: false default: 1 - status: supported + status: SUPPORTED scaling_enabled: type: boolean description: Indicates whether service scaling is enabled required: false default: true - status: supported + status: SUPPORTED mandatory: type: boolean description: Mandatory required: false default: true - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.RefDataSequence: derived_from: tosca.datatypes.Root description: network policy refs data sequence @@ -547,12 +547,12 @@ org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.RefDataSequence: type: integer description: Network Policy ref data sequence Major required: false - status: supported + status: SUPPORTED network_policy_refs_data_sequence_minor: type: integer description: Network Policy ref data sequence Minor required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.RefData: derived_from: tosca.datatypes.Root description: network policy refs data @@ -561,7 +561,7 @@ org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.RefData: type: org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.RefDataSequence description: Network Policy ref data sequence required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnet: derived_from: tosca.datatypes.Root description: Network Ipam Ref Data Subnet @@ -570,12 +570,12 @@ org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnet type: string description: Network ipam refs data ipam subnets ip prefix len required: false - status: supported + status: SUPPORTED network_ipam_refs_data_ipam_subnets_subnet_ip_prefix: type: string description: Network ipam refs data ipam subnets ip prefix required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnetList: derived_from: tosca.datatypes.Root description: Network Ipam Ref Data Subnet List @@ -584,12 +584,12 @@ org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnet type: org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnet description: Network ipam refs data ipam subnets required: false - status: supported + status: SUPPORTED network_ipam_refs_data_ipam_subnets_addr_from_start: type: string description: Network ipam refs data ipam subnets addr from start required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.IpamRefData: derived_from: tosca.datatypes.Root description: Network Ipam Ref Data @@ -598,7 +598,7 @@ org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.IpamRefData: type: list description: Network ipam refs data ipam subnets required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.virtual.network.rule.ref.data.IpamSubnetList org.openecomp.datatypes.heat.contrailV2.network.rule.SrcVirtualNetwork: @@ -609,7 +609,7 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.SrcVirtualNetwork: type: string description: Source addresses Virtual network required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.network.rule.DstVirtualNetwork: derived_from: tosca.datatypes.Root description: destination addresses @@ -618,7 +618,7 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.DstVirtualNetwork: type: string description: Destination addresses Virtual network required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.network.rule.DstPortPairs: derived_from: tosca.datatypes.Root description: destination port pairs @@ -627,12 +627,12 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.DstPortPairs: type: string description: Start port required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_dst_ports_end_port: type: string description: End port required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.network.rule.SrcPortPairs: derived_from: tosca.datatypes.Root description: source port pairs @@ -641,12 +641,12 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.SrcPortPairs: type: string description: Start port required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_src_ports_end_port: type: string description: End port required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.network.rule.ActionList: derived_from: tosca.datatypes.Root description: Action List @@ -655,12 +655,12 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.ActionList: type: string description: Simple Action required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_action_list_apply_service: type: list description: Apply Service required: false - status: supported + status: SUPPORTED entry_schema: type: string org.openecomp.datatypes.heat.contrailV2.network.rule.ActionList: @@ -671,12 +671,12 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.ActionList: type: string description: Simple Action required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_action_list_apply_service: type: list description: Apply Service required: false - status: supported + status: SUPPORTED entry_schema: type: string org.openecomp.datatypes.heat.contrailV2.network.rule.Rule: @@ -687,45 +687,45 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.Rule: type: list description: Destination addresses required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.network.rule.DstVirtualNetwork network_policy_entries_policy_rule_dst_ports: type: list description: Destination ports required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.network.rule.DstPortPairs network_policy_entries_policy_rule_protocol: type: string description: Protocol required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_src_addresses: type: list description: Source addresses required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.network.rule.SrcVirtualNetwork network_policy_entries_policy_rule_direction: type: string description: Direction required: false - status: supported + status: SUPPORTED network_policy_entries_policy_rule_src_ports: type: list description: Source ports required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.network.rule.SrcPortPairs network_policy_entries_policy_rule_action_list: type: org.openecomp.datatypes.heat.contrailV2.network.rule.ActionList description: Action list required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.network.rule.RuleList: derived_from: tosca.datatypes.Root description: list of policy rules @@ -734,7 +734,7 @@ org.openecomp.datatypes.heat.contrailV2.network.rule.RuleList: type: list description: Contrail network rule required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.network.rule.Rule org.openecomp.datatypes.heat.network.contrail.port.StaticRoute: @@ -745,17 +745,17 @@ org.openecomp.datatypes.heat.network.contrail.port.StaticRoute: type: string description: Route prefix required: false - status: supported + status: SUPPORTED next_hop: type: string description: Next hop required: false - status: supported + status: SUPPORTED next_hop_type: type: string description: Next hop type required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.contrail.AddressPair: derived_from: tosca.datatypes.Root description: Address Pair @@ -764,7 +764,7 @@ org.openecomp.datatypes.heat.network.contrail.AddressPair: type: string description: Address mode active-active or active-standy required: false - status: supported + status: SUPPORTED constraints: - valid_values: - active-active @@ -773,12 +773,12 @@ org.openecomp.datatypes.heat.network.contrail.AddressPair: type: string description: IP address prefix required: false - status: supported + status: SUPPORTED mac_address: type: string description: Mac address required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.network.contrail.InterfaceData: derived_from: tosca.datatypes.Root description: Interface Data @@ -787,26 +787,26 @@ org.openecomp.datatypes.heat.network.contrail.InterfaceData: type: list description: An ordered list of static routes to be added to this interface required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.network.contrail.port.StaticRoute virtual_network: type: string description: Virtual Network for this interface required: true - status: supported + status: SUPPORTED allowed_address_pairs: type: list description: List of allowed address pair for this interface required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.network.contrail.AddressPair ip_address: type: string description: IP for this interface required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.machine.interface.Properties: derived_from: tosca.datatypes.Root description: Virtual Machine Interface Properties. @@ -815,7 +815,7 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.interface.Properties: type: string description: Service Interface Type. required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.Root: derived_from: tosca.datatypes.Root description: > @@ -1061,12 +1061,12 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPair type: string description: IP Prefix. required: false - status: supported + status: SUPPORTED ip_prefix_len: type: integer description: IP Prefix Len. required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.MacAddress: derived_from: tosca.datatypes.Root @@ -1076,7 +1076,7 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.MacAddress: type: list description: Mac Addresses List. required: false - status: supported + status: SUPPORTED entry_schema: type: string @@ -1088,7 +1088,7 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.Properties: type: string description: Sub Interface VLAN Tag. required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPair: derived_from: tosca.datatypes.Root @@ -1098,17 +1098,17 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPair type: string description: Address Mode. required: false - status: supported + status: SUPPORTED ip: type: org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPairIp description: IP. required: false - status: supported + status: SUPPORTED mac: type: string description: Mac. required: false - status: supported + status: SUPPORTED org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPairs: derived_from: tosca.datatypes.Root @@ -1118,6 +1118,6 @@ org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPair type: list description: Addresses pair List. required: false - status: supported + status: SUPPORTED entry_schema: type: org.openecomp.datatypes.heat.contrailV2.virtual.machine.subInterface.AddressPair
\ No newline at end of file diff --git a/asdctool/src/main/resources/config/elasticsearch.yml b/asdctool/src/main/resources/config/elasticsearch.yml deleted file mode 100644 index 38482e2b02..0000000000 --- a/asdctool/src/main/resources/config/elasticsearch.yml +++ /dev/null @@ -1,393 +0,0 @@ - -elasticSearch.local: true -elasticSearch.transportclient: false -cluster.name: elasticsearch - -discovery.zen.ping.multicast.enabled: false -discovery.zen.ping.unicast.enabled: true -discovery.zen.ping.unicast.hosts: elasticsearch_host -transport.client.initial_nodes: - - elasticsearch_host:9300 - -http.cors.enabled: true - -#plugin.types: "DeleteByQueryPlugin" - -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at <http://elasticsearch.org/guide>. -# -# The installation procedure is covered at -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. -# -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. -# -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: -# -# node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html> - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. -# -# cluster.name: elasticsearch - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: -# -# node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. -# -# Allow this node to be eligible as a master node (enabled by default): -# -# node.master: true -# -# Allow this node to store data (enabled by default): -# -# node.data: true - -# You can exploit these settings to design advanced cluster topologies. -# -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. -# -# node.master: false -# node.data: true -# -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. -# -# node.master: true -# node.data: false -# -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -# node.master: false -# node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as <http://www.elasticsearch.org/overview/marvel/>, -# <http://github.com/karmi/elasticsearch-paramedic>, -# <http://github.com/lukas-vlcek/bigdesk> and -# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -# node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -# node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html> -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -# index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -# index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -index.number_of_shards: 1 -index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect -# the index status. - - -#################################### Paths #################################### -path.home: /src/test/resources -# Path to directory containing configuration (this file and logging.yml): -# -path.conf: /src/test/resources - -# Path to directory where to store index data allocated for this node. -# -path.data: target/esdata -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -# path.data: /path/to/data1,/path/to/data2 - -# Path to temporary files: -# -path.work: /target/eswork - -# Path to log files: -# -path.logs: /target/eslogs - -# Path to where plugins are installed: -# -# path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. -# -# plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. -# -# Set this property to true to lock the memory: -# -# bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. -# -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): -# -# network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. -# -# network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': -# -# network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): -# -# transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): -# -# transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: -# -# http.port: 9200 - -# Set a custom allowed content length: -# -# http.max_content_length: 100mb - -# Disable HTTP completely: -# -# http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# There are several types of gateway implementations. For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>. - -# The default gateway type is the "local" gateway (recommended): -# -# gateway.type: local - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: -# -gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): -# -# gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): -# -gateway.expected_nodes: 1 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: -# -# 1. During the initial recovery -# -# cluster.routing.allocation.node_initial_primaries_recoveries: 4 -# -# 2. During adding/removing nodes, rebalancing, etc -# -# cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): -# -# indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: -# -# indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. Its recommended to set it to a higher value -# than 1 when running more than 2 nodes in the cluster. -# -# discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: -# -# discovery.zen.ping.timeout: 3s - -# For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html> - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. -# -# 1. Disable multicast discovery (enabled by default): -# -# discovery.zen.ping.multicast.enabled: false -# -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: -# -# discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. -# -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html> -# -# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/> -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>. - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>. - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s - diff --git a/asdctool/src/main/resources/config/groupTypes.yml b/asdctool/src/main/resources/config/groupTypes.yml index 0c0abc9013..ce457e4add 100644 --- a/asdctool/src/main/resources/config/groupTypes.yml +++ b/asdctool/src/main/resources/config/groupTypes.yml @@ -6,12 +6,12 @@ org.openecomp.groups.heat.HeatStack: type: string description: Heat file which associate to this group/heat stack required: true - status: supported + status: SUPPORTED description: type: string description: group description required: true - status: supported + status: SUPPORTED org.openecomp.groups.VfModule: derived_from: tosca.groups.Root description: Grouped all heat resources which are in the same VF Module @@ -21,7 +21,7 @@ org.openecomp.groups.VfModule: description: Whether this module should be deployed before other modules required: true default: false - status: supported + status: SUPPORTED vf_module_label: type: string required: true diff --git a/asdctool/src/main/resources/config/janusgraph.properties b/asdctool/src/main/resources/config/janusgraph.properties index 5f22a08837..3e88b0d3c8 100644 --- a/asdctool/src/main/resources/config/janusgraph.properties +++ b/asdctool/src/main/resources/config/janusgraph.properties @@ -7,14 +7,14 @@ storage.connection-timeout=10000 storage.cassandra.keyspace=sdctitan storage.cassandra.ssl.enabled=true -storage.cassandra.ssl.truststore.location=C:\\gitWork\\vagrant-sdc-all-in-one\\mytmp.trust +storage.cassandra.ssl.truststore.location=/var/lib/jetty/etc/truststore storage.cassandra.ssl.truststore.password=Aa123456 storage.cassandra.read-consistency-level=LOCAL_QUORUM storage.cassandra.write-consistency-level=LOCAL_QUORUM storage.cassandra.replication-strategy-class=org.apache.cassandra.locator.NetworkTopologyStrategy -storage.cassandra.replication-strategy-options=DC-sdc-iltlv633,1 -storage.cassandra.astyanax.local-datacenter=DC-sdc-iltlv633 +storage.cassandra.replication-strategy-options=DC-ILTLV2083,1 +storage.cassandra.astyanax.local-datacenter=DC-ILTLV2083 cache.db-cache = false diff --git a/asdctool/src/main/resources/config/tmp.trust b/asdctool/src/main/resources/config/tmp.trust Binary files differnew file mode 100644 index 0000000000..f74b8f53cc --- /dev/null +++ b/asdctool/src/main/resources/config/tmp.trust diff --git a/asdctool/src/main/resources/elasticsearch.yml b/asdctool/src/main/resources/elasticsearch.yml deleted file mode 100644 index 71ccdbb8f5..0000000000 --- a/asdctool/src/main/resources/elasticsearch.yml +++ /dev/null @@ -1,399 +0,0 @@ - -cluster.name: elasticsearch - -discovery.zen.ping.multicast.enabled: false -discovery.zen.ping.unicast.enabled: true -discovery.zen.ping.unicast.hosts: elasticsearch_host - -http.cors.enabled: true - -path.home: "/home/vagrant/catalog-be/config" - -elasticSearch.transportclient: true - -transport.client.initial_nodes: - - elasticsearch_host:9300 - -#shield.user: asdc:Aa12345 -#shield.ssl.keystore.path: "/vagrant/install/resources/catalog-be/keystore/es-client.jks" -#shield.ssl.keystore.password: Aa123456 -#shield.transport.ssl: true - -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at <http://elasticsearch.org/guide>. -# -# The installation procedure is covered at -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. -# -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. -# -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: -# -# node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html> - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. -# -# cluster.name: elasticsearch - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: -# -# node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. -# -# Allow this node to be eligible as a master node (enabled by default): -# -# node.master: true -# -# Allow this node to store data (enabled by default): -# -# node.data: true - -# You can exploit these settings to design advanced cluster topologies. -# -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. -# -# node.master: false -# node.data: true -# -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. -# -# node.master: true -# node.data: false -# -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -# node.master: false -# node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as <http://www.elasticsearch.org/overview/marvel/>, -# <http://github.com/karmi/elasticsearch-paramedic>, -# <http://github.com/lukas-vlcek/bigdesk> and -# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -# node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -# node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html> -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -# index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -# index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -index.number_of_shards: 1 -index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): -# -path.conf: /src/test/resources - -# Path to directory where to store index data allocated for this node. -# -path.data: target/esdata -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -# path.data: /path/to/data1,/path/to/data2 - -# Path to temporary files: -# -path.work: /target/eswork - -# Path to log files: -# -path.logs: /target/eslogs - -# Path to where plugins are installed: -# -# path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. -# -# plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. -# -# Set this property to true to lock the memory: -# -# bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. -# -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): -# -# network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. -# -# network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': -# -# network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): -# -# transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): -# -# transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: -# -# http.port: 9200 - -# Set a custom allowed content length: -# -# http.max_content_length: 100mb - -# Disable HTTP completely: -# -# http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# There are several types of gateway implementations. For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>. - -# The default gateway type is the "local" gateway (recommended): -# -# gateway.type: local - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: -# -gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): -# -# gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): -# -gateway.expected_nodes: 1 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: -# -# 1. During the initial recovery -# -# cluster.routing.allocation.node_initial_primaries_recoveries: 4 -# -# 2. During adding/removing nodes, rebalancing, etc -# -# cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): -# -# indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: -# -# indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. Its recommended to set it to a higher value -# than 1 when running more than 2 nodes in the cluster. -# -# discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: -# -# discovery.zen.ping.timeout: 3s - -# For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html> - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. -# -# 1. Disable multicast discovery (enabled by default): -# -# discovery.zen.ping.multicast.enabled: false -# -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: -# -# discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. -# -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html> -# -# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/> -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>. - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>. - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s - diff --git a/asdctool/src/main/resources/es-resources/README.txt b/asdctool/src/main/resources/es-resources/README.txt deleted file mode 100644 index a7006efa80..0000000000 --- a/asdctool/src/main/resources/es-resources/README.txt +++ /dev/null @@ -1,43 +0,0 @@ -ASDC elasticsearch tool -======================== - -This tool purpose is to ease and allow updating elasticsearch indices. - -In order to use the scripts, you need to verify Python is installed and to install the elasticsearc-py library: - Verify pip is installed: $command -v pip - if not installed: - Download https://bootstrap.pypa.io/get-pip.py - $python get-pip.py (see instruction: https://pip.pypa.io/en/latest/installing/#installing-with-get-pip-py) - $pip install elasticsearch - - -Tool contains: - - index_ops.py - This script includes operations on elasticsearch index: - - create index: - $python index_ops.py -o create -a <elasticsearch hostname> -n <indexName> -f <index mapping file> - - delete index: - $python index_ops.py -o delete -a <elasticsearch hostname> -n <indexName> - - copy index (assumes destination index already exists): - $python index_ops.py -o move -a <elasticsearch hostname> -n <indexName> -t <toIndex> - - - - file_utils.py - This script includes operations on files - - - audit_migration_1602.py - This script run full flow to migrate audit information from previous versions to ASDC 1602 - It has 2 inputs: - 1. config_properties.py - this file holds configuration (hostname, index name, index mapping file etc.) - 2. folder of fields mapping per elasticsearch type (map old field to new field) - The flow of this script is as follow: - * create temp index with correct index mapping - * scan the audit index to get all records - * manipulate fields data and insert it to temp index - * delete audit index - * create audit index with correct mapping - * copy from temp index to newly created audit index - * delete temp index
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/auditMappings.txt b/asdctool/src/main/resources/es-resources/auditMappings.txt deleted file mode 100644 index 7de77cccbd..0000000000 --- a/asdctool/src/main/resources/es-resources/auditMappings.txt +++ /dev/null @@ -1,169 +0,0 @@ -{ "settings": {}, "mappings": -{ -"distributiondownloadevent": -{ "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "RESOURCE_URL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CONSUMER_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }}, - "_all": { "enabled": true } }, - "auditinggetuebclusterevent": -{ "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CONSUMER_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }}, - "_all": { "enabled": true } }, - "distributionstatusevent": -{ "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "RESOURCE_URL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TOPIC_NAME":{ "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CONSUMER_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }}, - "_all": { "enabled": true } }, -"distributionengineevent": -{ "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TOPIC_NAME":{ "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ROLE": { "include_in_all": true, "type": "string" }, - "API_KEY": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "D_ENV": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CONSUMER_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }}, - "_all": { "enabled": true } }, - "useraccessevent": { - "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_NAME": { "include_in_all": true, "type": "string" }} , - "_all": { "enabled": true }}, - "resourceadminevent": - { "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CURR_VERSION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CURR_STATE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_NAME": { "include_in_all": true, "type": "string" }, - "PREV_VERSION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "PREV_STATE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "RESOURCE_NAME": { "include_in_all": true, "type": "string" }, - "RESOURCE_TYPE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DPREV_STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DCURR_STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "COMMENT": { "include_in_all": true, "type": "string" }, - "ARTIFACT_NAME": { "include_in_all": true, "index": "not_analyzed", "type": "string" } }, - "_all": { "enabled": true }} , - "useradminevent": - { "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_NAME": { "include_in_all": true, "type": "string" }, - "USER_EMAIL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_ROLE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_AFTER_EMAIL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_BEFORE_ROLE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_AFTER_ROLE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_NAME": { "include_in_all": true, "type": "string" }, - "USER_BEFORE_NAME": { "include_in_all": true, "type": "string" }, - "USER_BEFORE_EMAIL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER_AFTER_NAME": { "include_in_all": true, "type": "string" } }, - "_all": { "enabled": true } }, -"distributionnotificationevent": - {"properties":{ - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CURR_STATE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CURR_VERSION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_NAME": { "include_in_all": true, "type": "string" }, - "MODIFIER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "RESOURCE_NAME": { "include_in_all": true, "type": "string" }, - "RESOURCE_TYPE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TOPIC_NAME":{ "include_in_all": true, "index": "not_analyzed", "type": "string" }}}, -"categoryevent": -{"properties":{ - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "MODIFIER": { "include_in_all": true, "type": "string" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CATEGORY_NAME": { "include_in_all": true, "type": "string" }, - "SUB_CATEGORY_NAME": { "include_in_all": true, "type": "string" }, - "GROUPING_NAME": { "include_in_all": true, "type": "string" }, - "RESOURCE_TYPE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }}, - "_all": { "enabled": true } }, - "authevent": { - "properties": { - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }, - "DESC": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "URL": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "USER": { "include_in_all": true, "type": "string" } , - "AUTH_STATUS": { "include_in_all": true, "index": "not_analyzed","type": "string" } , - "REALM": { "include_in_all": true, "index": "not_analyzed","type": "string" }} , - "_all": { "enabled": true }}, - "consumerevent": - {"properties":{ - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER": { "include_in_all": true, "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "ECOMP_USER": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }}, - "_all": { "enabled": true } }, - "distributiondeployevent": - { "properties": { - "ACTION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "CURR_VERSION": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "DESC": { "include_in_all": true, "type": "string" }, - "DID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "MODIFIER_NAME": { "include_in_all": true, "type": "string" }, - "MODIFIER_UID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "REQUEST_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "RESOURCE_NAME": { "include_in_all": true, "type": "string" }, - "RESOURCE_TYPE": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "SERVICE_INSTANCE_ID": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "STATUS": { "include_in_all": true, "index": "not_analyzed", "type": "string" }, - "TIMESTAMP": { "include_in_all": true, "ignore_malformed": false, "format": "yyyy-MM-dd HH:mm:ss.SSS", "precision_step": 4, "type": "date" }}}}}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/audit_migration_1602.py b/asdctool/src/main/resources/es-resources/audit_migration_1602.py deleted file mode 100644 index 8b61ebfaf0..0000000000 --- a/asdctool/src/main/resources/es-resources/audit_migration_1602.py +++ /dev/null @@ -1,132 +0,0 @@ -import itertools -import string -import json -from datetime import datetime -from elasticsearch import Elasticsearch -import elasticsearch -import elasticsearch.helpers -from elasticsearch.client import IndicesClient -import sys, os -from index_ops import createIndex, deleteIndex, copyIndex -from config_properties import getGlobalVar -from file_utils import readFileToJson - -def updateFieldNames(client, queryFrom, fromIndex, destIndex, addUTC): - typesDir="types" - typeFields = {} - for filename in os.listdir(typesDir): - print filename - fieldNames=readFileToJson(typesDir+os.sep+filename) - - type=filename.split(".")[0] - typeFields[type] = fieldNames - - client.indices.refresh(index=fromIndex) - res = elasticsearch.helpers.scan(client, query=queryFrom, index=fromIndex) - - actions = [] - for i in res: - res_type = i['_type'] - fieldNames = typeFields.get(res_type) - if (fieldNames != None): - action={} - for field in i['_source']: - updatedName=fieldNames.get(field) - if (updatedName != None): - if (field == 'timestamp' and addUTC == True): - value+=" UTC" - value=i['_source'].get(field) - action[updatedName]=value - else: - action[field]=i['_source'].get(field) - i['_source']=action - - i['_index']=destIndex - i.pop('_id', None) - actions.append(i) - - bulk_res = elasticsearch.helpers.bulk(client, actions) - print "bulk response: ", bulk_res - - - -def updateAllrecordsWithUTC(client, queryFrom, fromIndex, destIndex): - - #scan indices - client.indices.refresh(index=fromIndex) - res = elasticsearch.helpers.scan(client, query=queryFrom, index=fromIndex) - - actions = [] - for i in res: - print i - i['_index']=destIndex - i['_source']['TIMESTAMP']+=" UTC" - actions.append(i) - - bulk_res = elasticsearch.helpers.bulk(client, actions) - print "bulk response: ", bulk_res - - -def printQueryResults(client, myQuery, indexName): - client.indices.refresh(index=indexName) - res = elasticsearch.helpers.scan(client, query=myQuery, index=indexName) - for i in res: - print i - -def main(): - print "start script for changing fields" - print "=================================" - - # initialize es - es = Elasticsearch([getGlobalVar('host')]) - - try: - mapping=readFileToJson(getGlobalVar('mappingFileName')) - res = createIndex(es, getGlobalVar('tempIndexName'), mapping) - if (res != 0): - print "script results in error" - sys.exit(1) - - print "scan audit index and manipulate data" - print "====================================" - - print "start time: ", datetime.now().time() - updateFieldNames(es, getGlobalVar('matchAllQuery'), getGlobalVar('origIndexName'), getGlobalVar('tempIndexName'), getGlobalVar('addUTC')) - - print "re-create original index" - print "=========================" - res = createIndex(es, getGlobalVar('origIndexName'), mapping) - if (res != 0): - print "script results in error" - sys.exit(1) - - print "copy data from temp index to original" - print "=======================================" - res = copyIndex(es, getGlobalVar('tempIndexName'), getGlobalVar('origIndexName')) - if (res != 0): - print "script results in error" - sys.exit(1) - - print "delete temp index" - print "==================" - res = deleteIndex(es, getGlobalVar('tempIndexName')) - if (res != 0): - print "script results in error" - sys.exit(1) - - - print "end time: ", datetime.now().time() - - except Exception, error: - print "An exception was thrown!" - print str(error) - return 2 - - -if __name__ == "__main__": - main() - - - - - diff --git a/asdctool/src/main/resources/es-resources/config_properties.py b/asdctool/src/main/resources/es-resources/config_properties.py deleted file mode 100644 index d0973001dc..0000000000 --- a/asdctool/src/main/resources/es-resources/config_properties.py +++ /dev/null @@ -1,11 +0,0 @@ -globalVars={ - "host": "127.0.0.1", - "origIndexName": "temp_audit", - "tempIndexName": "temp_audit2", - "addUTC": False, - "mappingFileName": "auditMappings.txt", - "matchAllQuery":{"query": {"match_all": {}}} -} - -def getGlobalVar(propertyName): - return globalVars.get(propertyName)
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/file_utils.py b/asdctool/src/main/resources/es-resources/file_utils.py deleted file mode 100644 index 743902084e..0000000000 --- a/asdctool/src/main/resources/es-resources/file_utils.py +++ /dev/null @@ -1,21 +0,0 @@ -import itertools -import string -import json -from datetime import datetime -from elasticsearch import Elasticsearch -import elasticsearch -import elasticsearch.helpers -from elasticsearch.client import IndicesClient -import sys, os - -def readFileToJson(fileName): - print "read file ", fileName - fo=open(fileName) - try: - json_mapping=json.load(fo) - fo.close() - except ValueError: - print "error in reading file " , fileName - fo.close() - raise - return json_mapping diff --git a/asdctool/src/main/resources/es-resources/index_ops.py b/asdctool/src/main/resources/es-resources/index_ops.py deleted file mode 100644 index d1f3bb0021..0000000000 --- a/asdctool/src/main/resources/es-resources/index_ops.py +++ /dev/null @@ -1,151 +0,0 @@ -import itertools -import string -import json -from datetime import datetime -from elasticsearch import Elasticsearch -import elasticsearch -import elasticsearch.helpers -from elasticsearch.client import IndicesClient, CatClient -import sys, os, getopt -from file_utils import readFileToJson -from config_properties import getGlobalVar - - - -def createIndex(client, indexName, createBody): - try: - print "start createIndex" - if (client == None): - client = Elasticsearch(['localhost']) - esIndexClient = IndicesClient(client) - res = deleteIndex(client, indexName) - if (res != 0): - print "operation failed" - return 2 - create_res=elasticsearch.client.IndicesClient.create(esIndexClient, index=indexName, body=createBody) - print "create index response: ", create_res - if (create_res['acknowledged'] != True): - print "failed to create index" - return 1 - else: - print "index ",indexName, " created successfully" - return 0 - except Exception, error: - print "An exception was thrown!" - print str(error) - return 2 - - -def deleteIndex(client, indexName): - try: - print "start deleteIndex" - if (client == None): - client = Elasticsearch(['localhost']) - esIndexClient = IndicesClient(client) - isExists=elasticsearch.client.IndicesClient.exists(esIndexClient, indexName) - if ( isExists == True ): - delete_res=elasticsearch.client.IndicesClient.delete(esIndexClient, index=indexName) - if (delete_res['acknowledged'] != True): - print "failed to delete index" - return 1 - else: - print "index ",indexName, " deleted" - return 0 - else: - print "index not found - assume already deleted" - return 0 - except Exception, error: - print "An exception was thrown!" - print str(error) - return 2 - -def copyIndex(client, fromIndex, toIndex): - try: - print "start copyIndex" - if (client == None): - client = Elasticsearch(['localhost']) - client.indices.refresh(index=fromIndex) - count=client.search(fromIndex, search_type='count') - print "original index count: ",count - docNum, docErrors = elasticsearch.helpers.reindex(client, fromIndex, toIndex) - print "copy result: ", docNum, docErrors - if (docNum != count['hits']['total']): - print "Failed to copy all documents. expected: ", count['hits']['total'], " actual: ", docNum - return 1 - # if (len(docErrors) != 0): - # print "copy returned with errors" - # print docErrors - # return 1 - return 0 - except Exception, error: - print "An exception was thrown!" - print str(error) - return 2 - - -def usage(): - print 'USAGE: ', sys.argv[0], '-o <operation : create | delete | move> -n <indexName> -a <address> -f <mappingFile (for create)> -t <toIndex (for move operation)>' - - - -def main(argv): - print "start script with ", len(sys.argv), 'arguments.' - print "==============================================" - - try: - opts, args = getopt.getopt(argv, "h:o:a:n:f:t:", ["operation","address","indexName","file","toIndex"]) - except getopt.GetoptError: - usage() - sys.exit(2) - - host = None - for opt, arg in opts: - print opt, arg - if opt == '-h': - usage() - sys.exit(2) - elif opt in ('-f', '--file'): - mapping=readFileToJson(arg) - elif opt in ('-a', '--address'): - host=arg - elif opt in ('-o', '--operation'): - operation=arg - elif opt in ('-n', '--indexName'): - indexName=arg - elif opt in ('-t', '--toIndex'): - destIndexName=arg - - if (operation == None): - usage() - sys.exit(2) - elif (host == None): - print "address is mandatory argument" - usage() - sys.exit(2) - elif operation == 'create': - print "create new index ", indexName - client = Elasticsearch([{'host': host, 'timeout':5}] ) - res = createIndex(client, indexName, mapping) - - elif operation == 'delete': - print "delete index ", indexName - client = Elasticsearch([{'host': host, 'timeout':5}] ) - res = deleteIndex(client, indexName) - - elif operation == 'move': - print "move index ", indexName, " to ", destIndexName - client = Elasticsearch([{'host': host, 'timeout':5}] ) - res = copyIndex(client, indexName, destIndexName) - else: - usage() - exit(2) - if res != 0: - print "ERROR: operation Failed" - exit(1) - - - -if __name__ == "__main__": - main(sys.argv[1:]) - - diff --git a/asdctool/src/main/resources/es-resources/types/auditinggetuebclusterevent.txt b/asdctool/src/main/resources/es-resources/types/auditinggetuebclusterevent.txt deleted file mode 100644 index b7e9435f97..0000000000 --- a/asdctool/src/main/resources/es-resources/types/auditinggetuebclusterevent.txt +++ /dev/null @@ -1,8 +0,0 @@ -{ "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "consumerId": "CONSUMER_ID" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/distributiondeployevent.txt b/asdctool/src/main/resources/es-resources/types/distributiondeployevent.txt deleted file mode 100644 index a74f0370e6..0000000000 --- a/asdctool/src/main/resources/es-resources/types/distributiondeployevent.txt +++ /dev/null @@ -1,14 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "currVersion": "CURR_VERSION", - "distributionId": "DID", - "modifierName": "MODIFIER_NAME", - "modifierUid": "MODIFIER_UID", - "resourceName": "RESOURCE_NAME", - "resourceType": "RESOURCE_TYPE" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/distributiondownloadevent.txt b/asdctool/src/main/resources/es-resources/types/distributiondownloadevent.txt deleted file mode 100644 index 879c4c4231..0000000000 --- a/asdctool/src/main/resources/es-resources/types/distributiondownloadevent.txt +++ /dev/null @@ -1,9 +0,0 @@ -{ "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "resourceUrl": "RESOURCE_URL", - "consumerId": "CONSUMER_ID" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/distributionengineevent.txt b/asdctool/src/main/resources/es-resources/types/distributionengineevent.txt deleted file mode 100644 index a261042720..0000000000 --- a/asdctool/src/main/resources/es-resources/types/distributionengineevent.txt +++ /dev/null @@ -1,13 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "consumerId": "CONSUMER_ID", - "role": "ROLE", - "topicName": "TOPIC_NAME", - "apiKey": "API_KEY", - "environmentName": "D_ENV" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/distributionnotificationevent.txt b/asdctool/src/main/resources/es-resources/types/distributionnotificationevent.txt deleted file mode 100644 index 6375ead9bb..0000000000 --- a/asdctool/src/main/resources/es-resources/types/distributionnotificationevent.txt +++ /dev/null @@ -1,16 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "currVersion": "CURR_VERSION", - "currState": "CURR_STATE", - "distributionId": "DID", - "modifierName": "MODIFIER_NAME", - "modifierUid": "MODIFIER_UID", - "resourceName": "RESOURCE_NAME", - "resourceType": "RESOURCE_TYPE", - "topicName": "TOPIC_NAME" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/distributionstatusevent.txt b/asdctool/src/main/resources/es-resources/types/distributionstatusevent.txt deleted file mode 100644 index 8fed9dd0c0..0000000000 --- a/asdctool/src/main/resources/es-resources/types/distributionstatusevent.txt +++ /dev/null @@ -1,12 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "resourceUrl": "RESOURCE_URL", - "consumerId": "CONSUMER_ID", - "distributionId": "DID", - "topicName": "TOPIC_NAME" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/resourceadminevent.txt b/asdctool/src/main/resources/es-resources/types/resourceadminevent.txt deleted file mode 100644 index 4631aa3367..0000000000 --- a/asdctool/src/main/resources/es-resources/types/resourceadminevent.txt +++ /dev/null @@ -1,21 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "currVersion": "CURR_VERSION", - "currState": "CURR_STATE", - "distributionId": "DID", - "modifierName": "MODIFIER_NAME", - "modifierUid": "MODIFIER_UID", - "prevVersion": "PREV_VERSION", - "prevState": "PREV_STATE", - "resourceName": "RESOURCE_NAME", - "resourceType": "RESOURCE_TYPE", - "dPrevStatus": "DPREV_STATUS", - "dCurrStatus": "DCURR_STATUS", - "comment": "COMMENT", - "artifactName": "ARTIFACT_NAME" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/useraccessevent.txt b/asdctool/src/main/resources/es-resources/types/useraccessevent.txt deleted file mode 100644 index ebd27b55e3..0000000000 --- a/asdctool/src/main/resources/es-resources/types/useraccessevent.txt +++ /dev/null @@ -1,10 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "userUid": "USER_UID", - "userName": "USER_NAME" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/es-resources/types/useradminevent.txt b/asdctool/src/main/resources/es-resources/types/useradminevent.txt deleted file mode 100644 index 15e0d9bdca..0000000000 --- a/asdctool/src/main/resources/es-resources/types/useradminevent.txt +++ /dev/null @@ -1,20 +0,0 @@ -{ - "action": "ACTION", - "timestamp": "TIMESTAMP", - "requestId": "REQUEST_ID", - "serviceInstanceId": "SERVICE_INSTANCE_ID", - "desc": "DESC", - "status": "STATUS", - "modifierName": "MODIFIER_NAME", - "modifierUid": "MODIFIER_UID", - "userUid": "USER_UID", - "userName": "USER_NAME", - "userEmail": "USER_EMAIL", - "userRole": "USER_ROLE", - "userBeforeName": "USER_BEFORE_NAME", - "userBeforeEmail": "USER_BEFORE_EMAIL", - "userBeforeRole": "USER_BEFORE_ROLE", - "userAfterName": "USER_AFTER_NAME", - "userAfterEmail": "USER_AFTER_EMAIL", - "userAfterRole": "USER_AFTER_ROLE" -}
\ No newline at end of file diff --git a/asdctool/src/main/resources/scripts/esToCassandraMigration.sh b/asdctool/src/main/resources/scripts/esToCassandraMigration.sh deleted file mode 100644 index 383904c661..0000000000 --- a/asdctool/src/main/resources/scripts/esToCassandraMigration.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -CURRENT_DIR=`pwd` -BASEDIR=$(dirname $0) - -if [ ${BASEDIR:0:1} = "/" ] -then - FULL_PATH=$BASEDIR -else - FULL_PATH=$CURRENT_DIR/$BASEDIR -fi - -source ${FULL_PATH}/baseOperation.sh - -mainClass="org.openecomp.sdc.asdctool.main.EsToCassandraDataMigrationMenu" - -command="java $JVM_LOG_FILE -cp $JARS $mainClass es-to-cassndra-migration $@" -echo $command - -$command -result=$? - -echo "***********************************" -echo "***** $result *********************" -echo "***********************************" - -exit $result - - diff --git a/asdctool/src/main/resources/scripts/esToCassandraMigrationExportOnly.sh b/asdctool/src/main/resources/scripts/esToCassandraMigrationExportOnly.sh deleted file mode 100644 index 2c8e346f30..0000000000 --- a/asdctool/src/main/resources/scripts/esToCassandraMigrationExportOnly.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -CURRENT_DIR=`pwd` -BASEDIR=$(dirname $0) - -if [ ${BASEDIR:0:1} = "/" ] -then - FULL_PATH=$BASEDIR -else - FULL_PATH=$CURRENT_DIR/$BASEDIR -fi - -source ${FULL_PATH}/baseOperation.sh - -mainClass="org.openecomp.sdc.asdctool.main.EsToCassandraDataMigrationMenu" - -command="java $JVM_LOG_FILE -cp $JARS $mainClass es-to-cassndra-migration-export-only $@" -echo $command - -$command -result=$? - -echo "***********************************" -echo "***** $result *********************" -echo "***********************************" - -exit $result - - diff --git a/asdctool/src/main/resources/scripts/esToCassandraMigrationImportOnly.sh b/asdctool/src/main/resources/scripts/esToCassandraMigrationImportOnly.sh deleted file mode 100644 index 9ce3ca8aae..0000000000 --- a/asdctool/src/main/resources/scripts/esToCassandraMigrationImportOnly.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -CURRENT_DIR=`pwd` -BASEDIR=$(dirname $0) - -if [ ${BASEDIR:0:1} = "/" ] -then - FULL_PATH=$BASEDIR -else - FULL_PATH=$CURRENT_DIR/$BASEDIR -fi - -source ${FULL_PATH}/baseOperation.sh - -mainClass="org.openecomp.sdc.asdctool.main.EsToCassandraDataMigrationMenu" - -command="java $JVM_LOG_FILE -cp $JARS $mainClass es-to-cassndra-migration-import-only $@" -echo $command - -$command -result=$? - -echo "***********************************" -echo "***** $result *********************" -echo "***********************************" - -exit $result - - diff --git a/asdctool/src/main/resources/scripts/getConsumers.sh b/asdctool/src/main/resources/scripts/getConsumers.sh deleted file mode 100644 index d02aac629d..0000000000 --- a/asdctool/src/main/resources/scripts/getConsumers.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -############################## -# Get list of SDC consumers -############################## - - -CURRENT_DIR=`pwd` -BASEDIR=$(dirname $0) - -if [ ${BASEDIR:0:1} = "/" ] -then - FULL_PATH=$BASEDIR -else - FULL_PATH=$CURRENT_DIR/$BASEDIR -fi - -source ${FULL_PATH}/baseOperation.sh - -mainClass="org.openecomp.sdc.asdctool.main.GetConsumersMenu" - -command="java $JVM_LOG_FILE -Xmx1024M -cp $JARS $mainClass $@" -echo $command - -$command -result=$? - - - -echo "***********************************" -echo "***** $result *********************" -echo "***********************************" - -exit $result - - - diff --git a/asdctool/src/main/resources/scripts/python/user/exportUsers.py b/asdctool/src/main/resources/scripts/python/user/exportUsers.py index 9e695ad8fd..ed7515cc3e 100644 --- a/asdctool/src/main/resources/scripts/python/user/exportUsers.py +++ b/asdctool/src/main/resources/scripts/python/user/exportUsers.py @@ -40,7 +40,8 @@ def getUsers(scheme, beHost, bePort, adminUser): c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json', 'Accept: application/json', adminHeader]) if scheme == 'https': - c.setopt(c.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYHOST, 0) res = c.perform() #print(res) diff --git a/asdctool/src/main/resources/scripts/python/user/importUsers.py b/asdctool/src/main/resources/scripts/python/user/importUsers.py index 984b75bd4c..82ddec5139 100644 --- a/asdctool/src/main/resources/scripts/python/user/importUsers.py +++ b/asdctool/src/main/resources/scripts/python/user/importUsers.py @@ -70,7 +70,8 @@ def getUser(scheme, beHost, bePort, user): c.setopt(c.WRITEFUNCTION, lambda x: None) if scheme == 'https': - c.setopt(c.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYHOST, 0) res = c.perform() @@ -111,7 +112,8 @@ def createUser(scheme, beHost, bePort, user, adminUser): c.setopt(c.WRITEFUNCTION, lambda x: None) if scheme == 'https': - c.setopt(c.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYPEER, 0) + c.setopt(pycurl.SSL_VERIFYHOST, 0) #print("before perform") res = c.perform() |