summaryrefslogtreecommitdiffstats
path: root/src/main/java/org/onap/dmaap/dmf/mr/beans/DMaaPKafkaConsumerFactory.java
blob: 26a8cf4e2d6061f87c1c422043915354c4073c2d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
/*******************************************************************************
 *  ============LICENSE_START=======================================================
 *  org.onap.dmaap
 *  ================================================================================
 *  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
 *  ================================================================================
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *        http://www.apache.org/licenses/LICENSE-2.0
*  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 *  ============LICENSE_END=========================================================
 *  
 *  ECOMP is a trademark and service mark of AT&T Intellectual Property.
 *  
 *******************************************************************************/
package org.onap.dmaap.dmf.mr.beans;

import com.att.ajsc.filemonitor.AJSCPropertiesMap;
import com.att.eelf.configuration.EELFLogger;
import com.att.eelf.configuration.EELFManager;
import com.att.nsa.drumlin.till.nv.rrNvReadable.missingReqdSetting;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.onap.dmaap.dmf.mr.CambriaApiException;
import org.onap.dmaap.dmf.mr.backends.Consumer;
import org.onap.dmaap.dmf.mr.backends.ConsumerFactory;
import org.onap.dmaap.dmf.mr.backends.MetricsSet;
import org.onap.dmaap.dmf.mr.backends.kafka.*;
import org.onap.dmaap.dmf.mr.backends.kafka.KafkaConsumerCache.KafkaConsumerCacheException;
import org.onap.dmaap.dmf.mr.constants.CambriaConstants;
import org.onap.dmaap.dmf.mr.utils.ConfigurationReader;
import org.onap.dmaap.dmf.mr.utils.Utils;
import org.springframework.beans.factory.annotation.Qualifier;

import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * @author nilanjana.maity
 *
 */
public class DMaaPKafkaConsumerFactory implements ConsumerFactory {

	
	private static final EELFLogger log = EELFManager.getInstance().getLogger(DMaaPKafkaConsumerFactory.class);
	

	/**
	 * constructor initialization
	 * 
	 * @param settings
	 * @param metrics
	 * @param curator
	 * @throws missingReqdSetting
	 * @throws KafkaConsumerCacheException
	 * @throws UnknownHostException
	 */

	public DMaaPKafkaConsumerFactory(@Qualifier("dMaaPMetricsSet") MetricsSet metrics,
			@Qualifier("curator") CuratorFramework curator,
			@Qualifier("kafkalockavoid") KafkaLiveLockAvoider2 kafkaLiveLockAvoider)
			throws missingReqdSetting, KafkaConsumerCacheException, UnknownHostException {

		String apiNodeId = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
				CambriaConstants.kSetting_ApiNodeIdentifier);
		if (apiNodeId == null) {

			apiNodeId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + CambriaConstants.kDefault_Port;
		}

		log.info("This Cambria API Node identifies itself as [" + apiNodeId + "].");
		final String mode = CambriaConstants.DMAAP;

		fkafkaBrokers = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
				"kafka.metadata.broker.list");
		if (null == fkafkaBrokers) {

			fkafkaBrokers = "localhost:9092";
		}

		boolean kSetting_EnableCache = kDefault_IsCacheEnabled;
		String strkSetting_EnableCache = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
				"cambria.consumer.cache.enabled");
		if (null != strkSetting_EnableCache)
			kSetting_EnableCache = Boolean.parseBoolean(strkSetting_EnableCache);

		final boolean isCacheEnabled = kSetting_EnableCache;

		
		fCache = null;
		if (isCacheEnabled) {
			fCache = KafkaConsumerCache.getInstance();

		}
		if (fCache != null) {
			fCache.setfMetrics(metrics);
			fCache.setfApiId(apiNodeId);
			fCache.startCache(mode, curator);
			if(kafkaLiveLockAvoider!=null){
			kafkaLiveLockAvoider.startNewWatcherForServer(apiNodeId, makeAvoidanceCallback(apiNodeId));
			fkafkaLiveLockAvoider = kafkaLiveLockAvoider;
			}
		}
	}

	/*
	 * getConsumerFor
	 * 
	 * @see
	 * com.att.dmf.mr.backends.ConsumerFactory#getConsumerFor(java.lang.String,
	 * java.lang.String, java.lang.String, int, java.lang.String) This method is
	 * used by EventServiceImpl.getEvents() method to get a Kakfa consumer
	 * either from kafkaconsumer cache or create a new connection This also get
	 * the list of other consumer objects for the same consumer group and set to
	 * KafkaConsumer object. This list may be used during poll-rebalancing
	 * issue.
	 */
	@Override
	public Consumer getConsumerFor(String topic, String consumerGroupName, String consumerId, int timeoutMs,
			String remotehost) throws UnavailableException, CambriaApiException {
		Kafka011Consumer kc;

		// To synchronize based on the consumer group.

		Object syncObject = synchash.get(topic + consumerGroupName);
		if (null == syncObject) {
			syncObject = new Object();
			synchash.put(topic + consumerGroupName, syncObject);
		}

		synchronized (syncObject) {
			try {
				kc = (fCache != null) ? fCache.getConsumerFor(topic, consumerGroupName, consumerId) : null; // consumerId

			} catch (KafkaConsumerCacheException e) {
				log.info("######@@@@### Error occured in Kafka Caching" + e + "  " + topic + "::" + consumerGroupName
						+ "::" + consumerId);
				log.error("####@@@@## Error occured in Kafka Caching" + e + "  " + topic + "::" + consumerGroupName
						+ "::" + consumerId);
				throw new UnavailableException(e);
			}

			// Ideally if cache exists below flow should be skipped. If cache
			// didnt
			// exist, then create this first time on this node.
			if (kc == null) {

				log.info("^Kafka consumer cache value " + topic + "::" + consumerGroupName + "::" + consumerId + " =>"
						+ kc);

				final InterProcessMutex ipLock = new InterProcessMutex(ConfigurationReader.getCurator(),
						"/consumerFactory/" + topic + "/" + consumerGroupName + "/" + consumerId);
				boolean locked = false;

				try {

					locked = ipLock.acquire(30, TimeUnit.SECONDS);
					if (!locked) {

						log.info("Could not acquire lock in order to create (topic, group, consumer) = " + "(" + topic
								+ ", " + consumerGroupName + ", " + consumerId + ") from " + remotehost);
						throw new UnavailableException(
								"Could not acquire lock in order to create (topic, group, consumer) = " + "(" + topic
										+ ", " + consumerGroupName + ", " + consumerId + ") " + remotehost);
					}

					// ConfigurationReader.getCurator().checkExists().forPath("S").

					log.info("Creating Kafka consumer for group [" + consumerGroupName + "], consumer [" + consumerId
							+ "], on topic [" + topic + "].");
					
					if (fCache != null) {
						fCache.signalOwnership(topic, consumerGroupName, consumerId);
					}
					
					final Properties props = createConsumerConfig(topic,consumerGroupName, consumerId);
					long fCreateTimeMs = System.currentTimeMillis();
					KafkaConsumer<String, String> cc = new KafkaConsumer<>(props);
					kc = new Kafka011Consumer(topic, consumerGroupName, consumerId, cc, fkafkaLiveLockAvoider);
					log.info(" kafka stream created in " + (System.currentTimeMillis() - fCreateTimeMs));

					if (fCache != null) {
						fCache.putConsumerFor(topic, consumerGroupName, consumerId, kc); //
					}

				} catch (org.I0Itec.zkclient.exception.ZkTimeoutException x) {
					log.info(
							"Kafka consumer couldn't connect to ZK. " + x + " " + consumerGroupName + "/" + consumerId);
					throw new UnavailableException("Couldn't connect to ZK.");
				} catch (KafkaConsumerCacheException e) {
					log.info("Failed to cache consumer (this may have performance implications): " + e.getMessage()
							+ " " + consumerGroupName + "/" + consumerId);
				} catch (UnavailableException u) {
					log.info("Failed and in UnavailableException block " + u.getMessage() + " " + consumerGroupName
							+ "/" + consumerId);
					throw new UnavailableException("Error while acquiring consumer factory lock " + u.getMessage(), u);
				} catch (Exception e) {
					log.info("Failed and go to Exception block " + e.getMessage() + " " + consumerGroupName + "/"
							+ consumerId);
					log.error("Failed and go to Exception block " + e.getMessage() + " " + consumerGroupName + "/"
							+ consumerId);
					
				} finally {
					if (locked) {
						try {
							ipLock.release();
						} catch (Exception e) {
							log.error("Error while releasing consumer factory lock", e);
						}
					}
				}
			}
		}
		return kc;
	}

	@Override
	public synchronized void destroyConsumer(String topic, String consumerGroup, String clientId) {
		if (fCache != null) {
			fCache.dropConsumer(topic, consumerGroup, clientId);
		}
	}

	@Override
	public synchronized Collection<? extends Consumer> getConsumers() {
		return fCache.getConsumers();
	}

	@Override
	public synchronized void dropCache() {
		fCache.dropAllConsumers();
	}

	
	private KafkaConsumerCache fCache;
	private KafkaLiveLockAvoider2 fkafkaLiveLockAvoider;
	private String fkafkaBrokers;



	private static String makeLongKey(String key, String prefix) {
		return prefix + "." + key;
	}

	private void transferSettingIfProvided(Properties target, String key, String prefix) {
		String keyVal = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop, makeLongKey(key, prefix));

		
		if (null != keyVal) {
		
			log.info("Setting [" + key + "] to " + keyVal + ".");
			target.put(key, keyVal);
		}
	}

	/**
	 * Name CreateConsumerconfig  
	 * @param topic
	 * @param groupId
	 * @param consumerId
	 * @return Properties
	 * 
	 * This method is to create Properties required to create kafka connection
	 * Group name is replaced with different format groupid--topic to address same 
	 * groupids for multiple topics. Same groupid with multiple topics 
	 * may start frequent consumer rebalancing on all the topics . Replacing them makes it unique
	 */
	private Properties createConsumerConfig(String topic ,String groupId, String consumerId) {
		final Properties props = new Properties();
		//fakeGroupName is added to avoid multiple consumer group for multiple topics.Donot Change this logic
		//Fix for CPFMF-644 : 
		final String fakeGroupName = groupId + "--" + topic;
		props.put("group.id", fakeGroupName);
		props.put("enable.auto.commit", "false"); // 0.11
		props.put("bootstrap.servers", fkafkaBrokers);
		if(Utils.isCadiEnabled()){
		props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='"+Utils.getKafkaproperty()+"';");
		props.put("security.protocol", "SASL_PLAINTEXT");
		props.put("sasl.mechanism", "PLAIN");
		}
		props.put("client.id", consumerId);

		// additional settings: start with our defaults, then pull in configured
		// overrides
		populateKafkaInternalDefaultsMap();
		for (String key : KafkaConsumerKeys) {
			transferSettingIfProvided(props, key, "kafka");
		}

		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

		return props;
	}


	private static final String KafkaConsumerKeys[] = { "bootstrap.servers", "heartbeat.interval.ms",
			"auto.offset.reset", "exclude.internal.topics", "session.timeout.ms", "fetch.max.bytes",
			"auto.commit.interval.ms", "connections.max.idle.ms", "fetch.min.bytes", "isolation.level",
			"fetch.max.bytes", "request.timeout.ms", "fetch.max.wait.bytes", "reconnect.backoff.max.ms",
			"max.partition.fetch.bytes", "reconnect.backoff.max.ms", "reconnect.backoff.ms", "retry.backoff.ms",
			"max.poll.interval.ms", "max.poll.records", "receive.buffer.bytes", "metadata.max.age.ms" };

	/**
	 * putting values in hashmap like consumer timeout, zookeeper time out, etc
	 * 
	 * @param setting
	 */
	private static void populateKafkaInternalDefaultsMap() { }

	/*
	 * The starterIncremnt value is just to emulate calling certain consumers,
	 * in this test app all the consumers are local
	 * 
	 */
	private LiveLockAvoidance makeAvoidanceCallback(final String appId) {

		return new LiveLockAvoidance() {

			@Override
			public String getAppId() {
				return appId;
			}

			@Override
			public void handleRebalanceUnlock(String groupName) {
				log.info("FORCE A POLL NOW FOR appId: [{}] group: [{}]", getAppId(), groupName);
				Kafka011ConsumerUtil.forcePollOnConsumer(groupName + "::");
			}

		};

	}

	@SuppressWarnings("rawtypes")
	@Override
	public HashMap getConsumerForKafka011(String topic, String consumerGroupName, String consumerId, int timeoutMs,
			String remotehost) throws UnavailableException, CambriaApiException {
		// TODO Auto-generated method stub
		return null;
	}

	private HashMap<String, Object> synchash = new HashMap<String, Object>();

}