Статьи

Интеграция журналов CloudWatch с Cloudhub Mule

В этом блоге я расскажу, как включить журналы AWS Cloudwatch для вашего приложения Mule CloudHub. Сервисы Cloudwatch Logs предоставляются AWS, чтобы вы могли лучше управлять своими журналами. Это относительно дешевле, чем спленк. Поскольку cloudhub автоматически пролонгирует журналы размером более 100 МБ, нам необходим механизм для более эффективного управления нашими журналами. Для этого мы создадим пользовательский appender, который будет отправлять логи в cloudwatch.

001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020
021
022
023
024
025
026
027
028
029
030
031
032
033
034
035
036
037
038
039
040
041
042
043
044
045
046
047
048
049
050
051
052
053
054
055
056
057
058
059
060
061
062
063
064
065
066
067
068
069
070
071
072
073
074
075
076
077
078
079
080
081
082
083
084
085
086
087
088
089
090
091
092
093
094
095
096
097
098
099
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
package com.javaroots.appenders;
 
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;
 
import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
 
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;
 
import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;
 
@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {
  
 /**
  *
  */
 private static final long serialVersionUID = 12321345L;
  
 private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);
 
 private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;
 
    /**
     * Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s
     */
    private Object sendMessagesLock = new Object();
 
    /**
     * The queue used to buffer log entries
     */
    private LinkedBlockingQueue loggingEventsQueue;
 
    /**
     * the AWS Cloudwatch Logs API client
     */
    private AWSLogs awsLogsClient;
 
    private AtomicReference lastSequenceToken = new AtomicReference<>();
 
    /**
     * The AWS Cloudwatch Log group name
     */
    private String logGroupName;
 
    /**
     * The AWS Cloudwatch Log stream name
     */
    private String logStreamName;
 
    /**
     * The queue / buffer size
     */
    private int queueLength = 1024;
 
    /**
     * The maximum number of log entries to send in one go to the AWS Cloudwatch Log service
     */
    private int messagesBatchSize = 128;
 
    private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);
  
 
    private CloudwatchAppender(final String name,
                           final Layout layout,
                           final Filter filter,
                           final boolean ignoreExceptions,String logGroupName,
                           String logStreamName,
                           Integer queueLength,
                           Integer messagesBatchSize) {
        super(name, filter, layout, ignoreExceptions);
        this.logGroupName = logGroupName;
        this.logStreamName = logStreamName;
        this.queueLength = queueLength;
        this.messagesBatchSize = messagesBatchSize;
        this.activateOptions();
    }
 
    @Override
    public void append(LogEvent event) {
      if (cloudwatchAppenderInitialised.get()) {
             loggingEventsQueue.offer(event);
         } else {
             // just do nothing
         }
    }
     
    public void activateOptions() {
        if (isBlank(logGroupName) || isBlank(logStreamName)) {
            logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");
            this.stop();
        } else {
          //below lines work with aws version 1.9.40 for local build
          //this.awsLogsClient = new AWSLogsClient();
          //awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));
          this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();
            loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);
            try {
                initializeCloudwatchResources();
                initCloudwatchDaemon();
                cloudwatchAppenderInitialised.set(true);
            } catch (Exception e) {
                logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);
                if (DEBUG_MODE) {
                    System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);
                    e.printStackTrace();
                }
            }
        }
    }
     
    private void initCloudwatchDaemon() {
     Thread t = new Thread(() -> {
            while (true) {
                try {
                    if (loggingEventsQueue.size() > 0) {
                        sendMessages();
                    }
                    Thread.currentThread().sleep(20L);
                } catch (InterruptedException e) {
                    if (DEBUG_MODE) {
                        e.printStackTrace();
                    }
                }
            }
        });
     t.setName("CloudwatchThread");
     t.setDaemon(true);
     t.start();
    }
     
    private void sendMessages() {
        synchronized (sendMessagesLock) {
            LogEvent polledLoggingEvent;
            final Layout layout = getLayout();
            List loggingEvents = new ArrayList<>();
 
            try {
 
                while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {
                    loggingEvents.add(polledLoggingEvent);
                }
                List inputLogEvents = loggingEvents.stream()
                        .map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis())
                          .withMessage
                          (
                            layout == null ?
                            loggingEvent.getMessage().getFormattedMessage():
                            new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8)
                            )
                          )
                        .sorted(comparing(InputLogEvent::getTimestamp))
                        .collect(toList());
 
                if (!inputLogEvents.isEmpty()) {
 
                    PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(
                            logGroupName,
                            logStreamName,
                            inputLogEvents);
 
                    try {
                        putLogEventsRequest.setSequenceToken(lastSequenceToken.get());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                    } catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {
                      
                        putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            dataAlreadyAcceptedExcepted.printStackTrace();
                        }
                    } catch (InvalidSequenceTokenException invalidSequenceTokenException) {
                        putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            invalidSequenceTokenException.printStackTrace();
                        }
                    }
                }
            } catch (Exception e) {
                if (DEBUG_MODE) {
                 logger2.error(" error inserting cloudwatch:",e);
                    e.printStackTrace();
                }
            }
        }
    }
 
    private void initializeCloudwatchResources() {
 
        DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();
        describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);
 
        Optional logGroupOptional = awsLogsClient
                .describeLogGroups(describeLogGroupsRequest)
                .getLogGroups()
                .stream()
                .filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName))
                .findFirst();
 
        if (!logGroupOptional.isPresent()) {
            CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);
            awsLogsClient.createLogGroup(createLogGroupRequest);
        }
 
        DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);
 
        Optional logStreamOptional = awsLogsClient
                .describeLogStreams(describeLogStreamsRequest)
                .getLogStreams()
                .stream()
                .filter(logStream -> logStream.getLogStreamName().equals(logStreamName))
                .findFirst();
        if (!logStreamOptional.isPresent()) {
            CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);
            CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);
        }
 
    }
     
    private boolean isBlank(String string) {
        return null == string || string.trim().length() == 0;
    }
    protected String getSimpleStacktraceAsString(final Throwable thrown) {
        final StringBuilder stackTraceBuilder = new StringBuilder();
        for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {
            new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",
                    stackTraceElement.getClassName(),
                    stackTraceElement.getMethodName(),
                    stackTraceElement.getFileName(),
                    stackTraceElement.getLineNumber());
        }
        return stackTraceBuilder.toString();
    }
 
    @Override
    public void start() {
        super.start();
    }
 
    @Override
    public void stop() {
        super.stop();
        while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {
            this.sendMessages();
        }
    }
 
    @Override
    public String toString() {
        return CloudwatchAppender.class.getSimpleName() + "{"
                + "name=" + getName() + " loggroupName=" + logGroupName
                +" logstreamName=" + logStreamName;
                
    }
 
    @PluginFactory
    @SuppressWarnings("unused")
    public static CloudwatchAppender createCloudWatchAppender(
      @PluginAttribute(value = "queueLength" ) Integer queueLength,
                                                  @PluginElement("Layout") Layout layout,
                                                  @PluginAttribute(value = "logGroupName") String logGroupName,
                                                  @PluginAttribute(value = "logStreamName") String logStreamName,
                                                  @PluginAttribute(value = "name") String name,
                                                  @PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,
                                                   
                                                  @PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize)
    {
     return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);
    }
}

Мы добавляем зависимость в наш файл pom.xml.

01
02
03
04
05
06
07
08
09
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
<dependency>
   <groupId>com.amazonaws</groupId>
   <artifactId>aws-java-sdk-logs</artifactId>
   <!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 -->
   <!-- <version>1.9.40</version> -->
   <version>1.11.105</version>
   <exclusions>
    <exclusion<!-- declare the exclusion here -->
     <groupId>org.apache.logging.log4j</groupId>
     <artifactId>log4j-1.2-api</artifactId>
    </exclusion>
    <exclusion<!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-core</artifactId>
    </exclusion>
    <exclusion<!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-databind</artifactId>
    </exclusion>
   </exclusions>
  </dependency>
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-api</artifactId>
   <version>2.5</version>
  </dependency>
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-core</artifactId>
   <version>2.5</version>
  </dependency>

Теперь нам нужно изменить наш log4j2.xml. также добавьте пользовательский app cloudwatch и CloudhubLogs, чтобы мы также могли получать логи на cloudhub.

01
02
03
04
05
06
07
08
09
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender">
 
 <!--These are some of the loggers you can enable.
     There are several more you can find in the documentation.
        Besides this log4j configuration, you can also use Java VM environment variables
        to enable other logs like network (-Djavax.net.debug=ssl or all) and
        Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will
        see them in the mule_ee.log file. -->
 
 
    <Appenders>
         <CLOUDW name="CloudW" logGroupName="test-log-stream"
        logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}">
   <PatternLayout pattern="%d [%t] %-5p %c - %m%n"/>
  </CLOUDW>
   
  <Log4J2CloudhubLogAppender name="CLOUDHUB"
                                   addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"
                                   applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"
                                   appendRetryIntervalMs="${sys:logging.appendRetryInterval}"
                                   appendMaxAttempts="${sys:logging.appendMaxAttempts}"
                                   batchSendIntervalMs="${sys:logging.batchSendInterval}"
                                   batchMaxRecords="${sys:logging.batchMaxRecords}"
                                   memBufferMaxSize="${sys:logging.memBufferMaxSize}"
                                   journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"
                                   journalMaxFileSize="${sys:logging.journalMaxFileSize}"
                                   clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"
                                   clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"
                                   clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"
                                   serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"
                                   serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"
                                   statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}">
 
            <PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/>
        </Log4J2CloudhubLogAppender>
         
    </Appenders>
    <Loggers>
      
      
  <!-- Http Logger shows wire traffic on DEBUG -->
  <AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/>
  
  <!-- JDBC Logger shows queries and parameters values on DEBUG -->
  <AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/>
     
        <!-- CXF is used heavily by Mule for web services -->
        <AsyncLogger name="org.apache.cxf" level="WARN"/>
 
        <!-- Apache Commons tend to make a lot of noise which can clutter the log-->
        <AsyncLogger name="org.apache" level="WARN"/>
 
        <!-- Reduce startup noise -->
        <AsyncLogger name="org.springframework.beans.factory" level="WARN"/>
 
        <!-- Mule classes -->
        <AsyncLogger name="org.mule" level="INFO"/>
        <AsyncLogger name="com.mulesoft" level="INFO"/>
 
        <!-- Reduce DM verbosity -->
        <AsyncLogger name="org.jetel" level="WARN"/>
        <AsyncLogger name="Tracking" level="WARN"/>
         
        <AsyncRoot level="INFO">
            <AppenderRef ref="CLOUDHUB" level="INFO"/>
            <AppenderRef ref="CloudW" level="INFO"/>
        </AsyncRoot>
    </Loggers>
</Configuration>

Наконец, нам нужно отключить логи cloudhub в менеджере времени исполнения cloudhub.

Это работает с CloudHub Mule Runtime версии 3.8.4. некоторые проблемы с версией cloudhub 3.8.5, когда он правильно инициализируется и отправляет журналы, но события и сообщения отсутствуют.

Опубликовано на Java Code Geeks с разрешения Абхишека Сомани, партнера нашей программы JCG. См. Оригинальную статью здесь: интеграция журналов CloudWatch с Cloudhub Mule

Мнения, высказанные участниками Java Code Geeks, являются их собственными.