/ bytecode / InstructionStream.h
InstructionStream.h
  1  /*
  2   * Copyright (C) 2018 Apple Inc. All rights reserved.
  3   *
  4   * Redistribution and use in source and binary forms, with or without
  5   * modification, are permitted provided that the following conditions
  6   * are met:
  7   * 1. Redistributions of source code must retain the above copyright
  8   *    notice, this list of conditions and the following disclaimer.
  9   * 2. Redistributions in binary form must reproduce the above copyright
 10   *    notice, this list of conditions and the following disclaimer in the
 11   *    documentation and/or other materials provided with the distribution.
 12   *
 13   * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 14   * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 15   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 16   * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 17   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 18   * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 19   * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 20   * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 21   * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 22   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 23   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 24   */
 25  
 26  
 27  #pragma once
 28  
 29  #include "BytecodeIndex.h"
 30  #include "Instruction.h"
 31  #include <wtf/Vector.h>
 32  
 33  namespace JSC {
 34  
 35  DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(InstructionStream);
 36  
 37  class InstructionStream {
 38      WTF_MAKE_FAST_ALLOCATED;
 39  
 40      friend class InstructionStreamWriter;
 41      friend class CachedInstructionStream;
 42  public:
 43      using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow, 16, InstructionStreamMalloc>;
 44  
 45      size_t sizeInBytes() const;
 46  
 47      using Offset = unsigned;
 48  
 49  private:
 50      template<class InstructionBuffer>
 51      class BaseRef {
 52          WTF_MAKE_FAST_ALLOCATED;
 53  
 54          friend class InstructionStream;
 55  
 56      public:
 57          BaseRef(const BaseRef<InstructionBuffer>& other)
 58              : m_instructions(other.m_instructions)
 59              ,  m_index(other.m_index)
 60          { }
 61  
 62          void operator=(const BaseRef<InstructionBuffer>& other)
 63          {
 64              m_instructions = other.m_instructions;
 65              m_index = other.m_index;
 66          }
 67  
 68          inline const Instruction* operator->() const { return unwrap(); }
 69          inline const Instruction* ptr() const { return unwrap(); }
 70  
 71          bool operator!=(const BaseRef<InstructionBuffer>& other) const
 72          {
 73              return &m_instructions != &other.m_instructions || m_index != other.m_index;
 74          }
 75  
 76          BaseRef next() const
 77          {
 78              return BaseRef { m_instructions, m_index + ptr()->size() };
 79          }
 80  
 81          inline Offset offset() const { return m_index; }
 82          inline BytecodeIndex index() const { return BytecodeIndex(offset()); }
 83  
 84          bool isValid() const
 85          {
 86              return m_index < m_instructions.size();
 87          }
 88  
 89      private:
 90          inline const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); }
 91  
 92      protected:
 93          BaseRef(InstructionBuffer& instructions, size_t index)
 94              : m_instructions(instructions)
 95              , m_index(index)
 96          { }
 97  
 98          InstructionBuffer& m_instructions;
 99          Offset m_index;
100      };
101  
102  public:
103      using Ref = BaseRef<const InstructionBuffer>;
104  
105      class MutableRef : public BaseRef<InstructionBuffer> {
106          friend class InstructionStreamWriter;
107  
108      protected:
109          using BaseRef<InstructionBuffer>::BaseRef;
110  
111      public:
112          Ref freeze() const  { return Ref { m_instructions, m_index }; }
113          inline Instruction* operator->() { return unwrap(); }
114          inline const Instruction* operator->() const { return unwrap(); }
115          inline Instruction* ptr() { return unwrap(); }
116          inline const Instruction* ptr() const { return unwrap(); }
117          inline operator Ref()
118          {
119              return Ref { m_instructions, m_index };
120          }
121  
122      private:
123          inline Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); }
124          inline const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); }
125      };
126  
127  private:
128      class iterator : public Ref {
129          friend class InstructionStream;
130  
131      public:
132          using Ref::Ref;
133  
134          Ref& operator*()
135          {
136              return *this;
137          }
138  
139          iterator& operator+=(size_t size)
140          {
141              m_index += size;
142              return *this;
143          }
144  
145          iterator& operator++()
146          {
147              return *this += ptr()->size();
148          }
149      };
150  
151  public:
152      inline iterator begin() const
153      {
154          return iterator { m_instructions, 0 };
155      }
156  
157      inline iterator end() const
158      {
159          return iterator { m_instructions, m_instructions.size() };
160      }
161  
162      inline const Ref at(BytecodeIndex index) const { return at(index.offset()); }
163      inline const Ref at(Offset offset) const
164      {
165          ASSERT(offset < m_instructions.size());
166          return Ref { m_instructions, offset };
167      }
168  
169      inline size_t size() const
170      {
171          return m_instructions.size();
172      }
173  
174      const void* rawPointer() const
175      {
176          return m_instructions.data();
177      }
178  
179      bool contains(Instruction*) const;
180  
181  protected:
182      explicit InstructionStream(InstructionBuffer&&);
183  
184      InstructionBuffer m_instructions;
185  };
186  
187  class InstructionStreamWriter : public InstructionStream {
188      friend class BytecodeRewriter;
189  public:
190      InstructionStreamWriter()
191          : InstructionStream({ })
192      { }
193  
194      void setInstructionBuffer(InstructionBuffer&& buffer)
195      {
196          RELEASE_ASSERT(!m_instructions.size());
197          RELEASE_ASSERT(!buffer.size());
198          m_instructions = WTFMove(buffer);
199      }
200  
201      inline MutableRef ref(Offset offset)
202      {
203          ASSERT(offset < m_instructions.size());
204          return MutableRef { m_instructions, offset };
205      }
206  
207      void seek(unsigned position)
208      {
209          ASSERT(position <= m_instructions.size());
210          m_position = position;
211      }
212  
213      unsigned position()
214      {
215          return m_position;
216      }
217  
218      void write(uint8_t byte)
219      {
220          ASSERT(!m_finalized);
221          if (m_position < m_instructions.size())
222              m_instructions[m_position++] = byte;
223          else {
224              m_instructions.append(byte);
225              m_position++;
226          }
227      }
228  
229      void write(uint16_t h)
230      {
231          ASSERT(!m_finalized);
232          uint8_t bytes[2];
233          std::memcpy(bytes, &h, sizeof(h));
234  
235          // Though not always obvious, we don't have to invert the order of the
236          // bytes written here for CPU(BIG_ENDIAN). This is because the incoming
237          // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms.
238          write(bytes[0]);
239          write(bytes[1]);
240      }
241  
242      void write(uint32_t i)
243      {
244          ASSERT(!m_finalized);
245          uint8_t bytes[4];
246          std::memcpy(bytes, &i, sizeof(i));
247  
248          // Though not always obvious, we don't have to invert the order of the
249          // bytes written here for CPU(BIG_ENDIAN). This is because the incoming
250          // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms.
251          write(bytes[0]);
252          write(bytes[1]);
253          write(bytes[2]);
254          write(bytes[3]);
255      }
256  
257      void rewind(MutableRef& ref)
258      {
259          ASSERT(ref.offset() < m_instructions.size());
260          m_instructions.shrink(ref.offset());
261          m_position = ref.offset();
262      }
263  
264      std::unique_ptr<InstructionStream> finalize()
265      {
266          m_finalized = true;
267          m_instructions.shrinkToFit();
268          return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) };
269      }
270  
271      std::unique_ptr<InstructionStream> finalize(InstructionBuffer& usedBuffer)
272      {
273          m_finalized = true;
274  
275          InstructionBuffer resultBuffer(m_instructions.size());
276          RELEASE_ASSERT(m_instructions.sizeInBytes() == resultBuffer.sizeInBytes());
277          memcpy(resultBuffer.data(), m_instructions.data(), m_instructions.sizeInBytes());
278  
279          usedBuffer = WTFMove(m_instructions);
280  
281          return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(resultBuffer)) };
282      }
283  
284      MutableRef ref()
285      {
286          return MutableRef { m_instructions, m_position };
287      }
288  
289      void swap(InstructionStreamWriter& other)
290      {
291          std::swap(m_finalized, other.m_finalized);
292          std::swap(m_position, other.m_position);
293          m_instructions.swap(other.m_instructions);
294      }
295  
296  private:
297      class iterator : public MutableRef {
298          friend class InstructionStreamWriter;
299  
300      protected:
301          using MutableRef::MutableRef;
302  
303      public:
304          MutableRef& operator*()
305          {
306              return *this;
307          }
308  
309          iterator& operator+=(size_t size)
310          {
311              m_index += size;
312              return *this;
313          }
314  
315          iterator& operator++()
316          {
317              return *this += ptr()->size();
318          }
319      };
320  
321  public:
322      iterator begin()
323      {
324          return iterator { m_instructions, 0 };
325      }
326  
327      iterator end()
328      {
329          return iterator { m_instructions, m_instructions.size() };
330      }
331  
332  private:
333      unsigned m_position { 0 };
334      bool m_finalized { false };
335  };
336  
337  
338  } // namespace JSC