Category Archives: ИТ

C++ sql-like Select example (imperfect)

I just would like to keep it here…

May be there is better implementation? Spent on it 30 mins, have no more time today.

// select.cpp

#include <vector>
#include <iostream>

  template<
      typename OutputCollectionT,
      typename OutputItemT = typename OutputCollectionT::value_type
  >
  class Select {
  public:
      template<
          typename InputCollectionT,
          typename InputItemT = typename InputCollectionT::value_type
      >
      class From {
      public:
          static OutputCollectionT Do(
              const InputCollectionT &Input,
              std::function<OutputItemT(const InputItemT &In)> Selector
          ) {
            OutputCollectionT Out;
            for (const InputItemT &In : Input) {
              Out.push_back(Selector(In));
            }
            return Out;
          }
      };
  };

struct A {
    int P1;
    int P2;
    int P3;
};

struct B {
    int P1;
    int P3;
};

int main() {
  std::vector<A> aa = {
      {1, 2, 3},
      {2, 3, 4},
      {3, 4, 5}
  };

  auto bb = Select<std::vector<B>>::From<std::vector<A>>::Do(
      aa, [=] (const A &a) -> B {
        return { a.P1, a.P3 };
      }
  );

  for (auto &b : bb) {
    std::cout
    << "P1:" << b.P1 << ", "
    << "P3:" << b.P3 << "\n";
  }

  return 0;
}
Please follow and like us:

Edge detection shader for text

Hi there! I’m working on text rendering for my small Bird OSD project.

So I want to add contours to the text, so it could be visible whatever background it is rendered on (bright or dark).

For example I want to enhance text rendering for cases like this:

(Ugh… My eyes suffer!)

Into this one:

Assuming we have 1-component color on input which consists only of alpha channel, I want to mark as edge alpha values around 0.5.

Below is my shader which works, and in fact above are screenshots with its demonstraction. It still has some limits though. Edge radius  can’t take values ended with .5 due to special rounding case for N*0.5 values. If you use it, and find more issues, please let me know.

edge-detection.glsl
// The inpute textures
uniform sampler2D uTexture;
varying vec2 vTexCoord;  // Interpolated texture coordinate per fragment.

uniform float uOpacity;

uniform float uWidth;

uniform float uHeight;

// If foreground value is higher than threshold, than edge is zero for this pixel
const float NO_EDGE_TRESHOLD = 0.5;

// Edge radius, works fine in range from 0.6 to 2.0
// Please don't use N*0.5 values, since it has special rounding rules
// and pixel at the left may be in is not the same distance comparing to the right.
const float EDGE_RADIUS = 1.;

const vec3 EDGE = vec3(0., 0., 0.);

// Detects whether we should put edge value in the center.
// Not that if the center is foreground value = 1, then there is
// no need in edge
// (in practice we also admit some values below 1,
// determined by threshold).
// We work with fonts, not the regular image, so
// edge is a function from average of two pixels (not the difference):
// f(l, r) = edge((l + r) / 2)
//    assuming edge should be max, when input value is "k" (belongs to range (0, 1) )
// So, how 'edge' function is defined?
// (see picture if formulaes are difficult)
//    edge(v) = (1./k) * x, if x <= k && x > 0
//              otherwise it is line which goes through p1[x,y] = [1, k] and p2[x,y] = [0, 1]
//
//  Y ^
//    | p[y=1,x=k]
//    |  /\
//    | /  \
//    |/    \
//    ----------->
//    0  k  1    X
//    y=edge(x) formulae
//
// in case when k = 0.5 then
//
// f(l, r) = 1 - |l + r - 1|
//
// Let's use this case!
//
// params:
//    left - left foreground value
//    center - center foreground value
//    right - right foreground value
// returns:
//    edge value.
//
float getEdge(float left, float center, float right) {
    if (center > NO_EDGE_TRESHOLD)
        return 0.;

    if (center > left && center > right)
        return 0.;

    float ledge = 1. - abs(left + center - 1.);
    float redge = 1. - abs(right + center - 1.);

    return max(ledge, redge);
}

float getNeighbour(float row, float col) {
    float dx = EDGE_RADIUS / uWidth;
    float dy = EDGE_RADIUS / uHeight;

    float texX = clamp(vTexCoord.x + col * dx, 0. + dx/2., 1. - dx/2.);
    float texY = clamp(vTexCoord.y + row * dy, 0. + dx/2., 1. - dx/2.);

    return texture2D(uTexture, vec2(texX, texY)).a;
}

float calcEdge(float centerValue) {
    // Nighbour pixels:
    // neighbour[i][j] is neighbour with X = x + (j-1) * DX; Y = y + (i-1) * DY;
    // neighbour[0][0] is neighbour with X = x - DX; Y = y - DY;
    float neighbour_0[3];
    float neighbour_1[3];
    float neighbour_2[3];

    for (int j = 0; j != 3; ++j)
        neighbour_0[j] = getNeighbour(-1., float(j-1));

    for (int j = 0; j != 3; ++j)
        neighbour_1[j] = getNeighbour(0., float(j-1));

    for (int j = 0; j != 3; ++j)
        neighbour_2[j] = getNeighbour(1., float(j-1));

    float horEdge = getEdge(neighbour_1[0], centerValue, neighbour_1[2]);
    float vertEdge = getEdge(neighbour_0[1], centerValue, neighbour_2[1]);
    float ltrbEdge = getEdge(neighbour_0[0], centerValue, neighbour_2[2]);
    float rtlbEdge = getEdge(neighbour_0[2], centerValue, neighbour_2[0]);

    return max( max(horEdge, vertEdge), max(ltrbEdge, rtlbEdge) );
}

vec4 calcFinalValue(vec3 foreground, float foregroundValue, float edgeValue) {
#if 1
    float sumFgEdge = foregroundValue + edgeValue;
    vec3 color = vec3(
        foreground * foregroundValue / sumFgEdge +
        EDGE * edgeValue / sumFgEdge);

    return vec4(color.r, color.g, color.b, min(sumFgEdge, 1. * uOpacity));

    //return vec4(EDGE, edgeValue);
#else
    return vec4(foreground.r, foreground.g, foreground.b, foregroundValue);
#endif
}

// The entry point for our fragment shader.
void main()
{
    vec4 texColor = texture2D(uTexture, vTexCoord);

    float foregroundValue = texColor.a;
    float edgeValue = calcEdge(foregroundValue);

    gl_FragColor = calcFinalValue(
        vec3(texColor.r, texColor.g, texColor.b),
        foregroundValue,
        edgeValue
    );
}
Please follow and like us:

The Bird Project

Bird OSD

Introduction

You have Raspberry Camera and you need FPV, but you can’t fight 100-200ms latency? Then there is a solution.

Bird OSD turns your Raspberry PI into FPV stream source with OSD overlay.

How?

Since raspberry has Video Composite Output, you can then cast raspberrian screen just like a regular FPV signal over FPV transmitter module!

Raspberry Pi works on broadcomm SoC  with VideoCore processor so that means we can apply OSD overlay to camera stream with really low realtime latencies.

X server is not requried

Bird OSD is a systemd service, it uses raspivid app to grab camera image, and it uses own bird-osd GLES2 application to apply overlay with sensor data on it.

So finally you should see something like this:

(GPS was broken, sorry, still can’t demonstrate in real fly)

Another pic from FPV goggles:

Prerequirements

  1. RPI device with sensors board (navio2 is ok)
  2. Raspberry Camera connected to it.
  3. Something sending MAVLink data to 127.0.0.1:14550(running ardupilot, arducopter, whatever)

How to install

Download .deb package onto your raspberry device:

$ wget http://ppa.dyatkovskiy.com/raspbian/pool/main/b/bird-osd/bird-osd_1.1.2_armhf.deb

And then install it:

$ sudo dpkg -i bird-osd_1.1.2_armhf.deb

Then you should target MAVLink channel to 127.0.0.1:14550

E.g. for arducopter:

$ sudo nano /etc/default/arducopter 

Ensure you have string like this:

TELEM1="-A udp:127.0.0.1:14550"

Or like this:

TELEM2="-C udp:127.0.0.1:14550"

In case you modified /etc/default/arducopterconfig, then you should restart service:

$ sudo systemctl restart arducopter

Finally you should start bird-osdservice with this command:

sudo systemctl start bird-osd

Then on monitor connected to your raspberryyou should see whatever your camera sees + overlay with sensors data!

It is still very first version:

  1. I only tested it on RPI 3, I added dependency to raspividand to bash:
    libraspberrypi-bin (>= 1.20180417-1), bash (>= 4.4-5)
    

    Perhaps dependency versions are higher then it really needs, just had no opportunity to test it on another envs.

  2. Do not to blame me guys for not opening sources. There are such a mess, need to sort them first.
  3. It still consumes too much of CPU time. After holidays I’ll work a bit on optimizations. It uses text atlas, but still builds text layout dynamically. It should render every static text to texture; per profiling survey results, it should improve performance on 30-40% (since most of text labels are static).
  4. Any proposals are welcome.

How enable or disable service

If you want to enable bird-osdon boot, you should run:

$ sudo systemctl enable bird-osd

This command disables service:

$ sudo systemctl disable bird-osd

How to uninstall

And this command removes bird-osdfrom you raspberry device:

$ dpkg -P bird-osd

Relevant topics

Edge detection for text – simple edge detection shader for text-like foreground drawings

Please follow and like us:

Cross compilation for Raspberry from Sierra

In very short

If you need to compile something for raspberry just run this:

path/to/clang --target=arm-linux-gnueabihf --sysroot=/some/path/arm-linux-gnueabihf/sysroot my-happy-program.c  -fuse-ld=lld

In command above “arm-linux-gnueabihf” – is my target triple.

If you don’t like LLVM or just need GCC, read Yuzhou Cheng’s article . Or lookup in nets something like “cross compilation for raspberry”. This may help. Below we describe how to do it with LLVM.

Disclaimer

We assume that reader knows how to deal with command line. If not, don’t worry, it’s ok, not to know some things in our life. Feel free and just ask questions in comments.

Let’s start

Root FS

Of course you still need rootfs. And also you perhaps need gcc binutils, but perhaps you would like to use ones provided by llvm infrastructure. But. You don’t have to build it, just get it, e.f. from Linux package. But actually I’m looking for solution how to make it enough just to mount my raspberry rootfs.

How to get LLVM

At current moment there are precompiled binaries for Mac OS (go to “Pre-Built Binaries” paragraph):

http://releases.llvm.org/download.html

Or for version 7.0.0 you may run this in terminal:

$ wget http://releases.llvm.org/7.0.0/clang+llvm-7.0.0-x86_64-apple-darwin.tar.xz

Compiling LLVM from sources

Don’t worry this is a bit different from building gcc. Difference is in statistics fact, that it usually successful and you can really drink cup of coffee.

Prerequirements

Below are few brew commands which adds all dependencies we need.

$ brew install swig
$ brew install cmake

Get sources

Get LLVM, Clang, LLD and LLDB sources, once again same link:

http://releases.llvm.org/download.html

Or for 7.0.0:
LLVM
Clang
LLD
LLDB

1. Extract LLVM sources.

2. Extract LLD into llvm/tools/lld

3. Extract LLDB into llvm/tools/lldb

4. Most tricky part: lldb needs to be code signed. This article describes how to to that. Actually you should find it in your lldb sources dir, in lldb/docs/code-signing.txt.

5. Create some binary dir, let say “llvm.darwin-x86_64”, and cd into it.

6. Compile

cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release <path to llvm sources>

make -j<num-parallel jobs, for me it is 8>

7. Test it.

make -j8 check

8. Use it!

Post scriptum

Optionally you may use legacy binutils. In this case install them with brew:

$ brew install arm-linux-gnueabihf-binutils

But I prefer just to use single solution.

CMake Toolchain

Below is my cmake toolchain file, which uses clang (built from sources). Hope it will be useful for you.

toolchain.cmake

SET(CMAKE_SYSTEM_VERSION 1)
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR arm)

# Custom toolchain-specific definitions for your project
set(PLATFORM_ARM "1")
set(PLATFORM_COMPILE_DEFS "COMPILE_GLES")

# There we go!
# Below, we specify toolchain itself!

SET(TARGET_TRIPLE arm-linux-gnueabihf)

# Specify your target rootfs mount point on your compiler host machine
SET(TARGET_ROOTFS /Volumes/rootfs-${TARGET_TRIPLE})

# Specify clang paths
SET(LLVM_DIR /Users/stepan/projects/shared/toolchains/llvm-7.0.darwin-release-x86_64/install)
SET(CLANG ${LLVM_DIR}/bin/clang)
SET(CLANGXX ${LLVM_DIR}/bin/clang++)

# Specify compiler (which is clang)
SET(CMAKE_C_COMPILER   ${CLANG})
SET(CMAKE_CXX_COMPILER ${CLANGXX})

# Specify binutils

SET (CMAKE_AR      "${LLVM_DIR}/bin/llvm-ar" CACHE FILEPATH "Archiver")
SET (CMAKE_LINKER  "${LLVM_DIR}/bin/llvm-ld" CACHE FILEPATH "Linker")
SET (CMAKE_NM      "${LLVM_DIR}/bin/llvm-nm" CACHE FILEPATH "NM")
SET (CMAKE_OBJDUMP "${LLVM_DIR}/bin/llvm-objdump" CACHE FILEPATH "Objdump")
SET (CMAKE_RANLIB  "${LLVM_DIR}/bin/llvm-ranlib" CACHE FILEPATH "ranlib")

# You may use legacy binutils though.
#SET(BINUTILS /usr/local/Cellar/arm-linux-gnueabihf-binutils/2.31.1)
#SET (CMAKE_AR      "${BINUTILS}/bin/${TARGET_TRIPLE}-ar" CACHE FILEPATH "Archiver")
#SET (CMAKE_LINKER  "${BINUTILS}/bin/${TARGET_TRIPLE}-ld" CACHE FILEPATH "Linker")
#SET (CMAKE_NM      "${BINUTILS}/bin/${TARGET_TRIPLE}-nm" CACHE FILEPATH "NM")
#SET (CMAKE_OBJDUMP "${BINUTILS}/bin/${TARGET_TRIPLE}-objdump" CACHE FILEPATH "Objdump")
#SET (CMAKE_RANLIB  "${BINUTILS}/bin/${TARGET_TRIPLE}-ranlib" CACHE FILEPATH "ranlib")

# Specify sysroot (almost same as rootfs)
SET(CMAKE_SYSROOT ${TARGET_ROOTFS})
SET(CMAKE_FIND_ROOT_PATH ${TARGET_ROOTFS})

# Specify lookup methods for cmake
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

# Sometimes you also need this:
# set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)

# Specify raspberry triple
set(CROSS_FLAGS "--target=${TARGET_TRIPLE}")

# Specify other raspberry related flags
set(RASP_FLAGS "-D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS")

# Gather and distribute flags specified at prev steps.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CROSS_FLAGS} ${RASP_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CROSS_FLAGS} ${RASP_FLAGS}")

# Use clang linker. Why?
# Well, you may install custom arm-linux-gnueabihf binutils,
# but then, you also need to recompile clang, with customized triple;
# otherwise clang will try to use host 'ld' for linking,
# so... use clang linker.
set(CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld)

Sometimes you need to run “cmake” twice, for first compilation gives you this:

error: invalid linker name in argument '-fuse-ld=lld;-fuse-ld=lld'

I have no idea why that happens. Rerunning cmake really helps.

Ok, that’s it.

Message me if you feel lonely dude, I’m still on it, it will try to help!

Please follow and like us:

Machine FP partial invariance issue

Invariance issue

In computer representation:

“a + b + c” and “a + c + b” is not the same!
(and not the samefor multiplicationas well).

Hallelujah! I finally got that simple fact! After so many years of working in IT industry and software development!Well, Ikind of knew this, but never took it seriously until recently
If you guys are curious how ape dealt with getting bananatask
If you are same late as I am, read bellow.

Floating point machine representation

Usuallyfloating point number is represented as follows:
v = m * (be)

Where

m– is the mantissa, an integer with limited range. For example, for decimal numbers it could be in range from 0 to 99. For 24 bit binary numbers it is in range from 0 to (224-1), or from 0 to 16777215.
b– is the base, usually b = 2, an integer value,
e– is exponent, integer, it could take both negative and positive values.
For example in decimal numbers representation 0.5 is represented as:
0.5 = 5 * 10-1 (here m=5, b=10, e=-1)
For binary numbers 0.5 is 2-1 (m=1, b=2, e=-1)

Some people know, that in order to store bigger numbers we need more space in memory. But bigger precision also requires more memory, for we need mantissa of greater width, and thus we also need more bits to store it.

Integer vs float

While working with regular integer numbers we also having data loss and overflow issues, and yet we’re able to control it. We keep in mind minimum and maximum possible integer results, and this know when overflow might happen.
Floating point numbers is different. AFAIK no sane people control mantissa overflow, except perhaps some really rare cases. So here, better to think it just happens all the time.

Inevitable data loss

It is impossible to store numbers with infinite precision, and thus, data loss is inevitable. It’s obvious, but easy to miss if you had never dealt with some cases.
We can’t work with exact real number “N”…
We only able to work with its nearest machine floating pointrepresentation, fp(N) or:
N* = fp(N)

For mantissa in range 0 .. 999 we have next errors.
Number9999will be stored as
v = fp(9999) = 999e+1 = 9990
(here we lost info about most right “9”)

and number1.001will be stored just as
v = fp(1.001)=1
(here we lost info about most right “1”)

a + b + c

Actually v = a + b + c is performed in two steps:
Step 1: x = a + b
Step 2: v = x + c
Or with respect to fp transformation:
Step 1: x = fp(a + b)
Step 2: v = fp(x + c)
By changing the order of sum components, we in fact change what we’re going to loss on each step. And by changing order of band c we get different data loss, just like a final result.

Examples

Let’s demonstrate it on the next example.
  • mantissa can store up to 2 decimal digits, and thus in range 0 .. 99.
  • base is 10.
  • exponent could be any, for it doesn’t matter here really.
Let’s use values:
a = 99 (m=99, e = 0)
b = 10 (m=1, e = 1)
c = 1 (m=1, e = 0)
And consider the difference of “a+b+c” and “a+c+b”:
a + b +c:
fp(a+b) = fp(99+10) = fp(109) = 100
v = fp( fp(a+b) + c ) = fp(100 + 1) = fp(101) = 100

a + c + b:
fp(a+c) = fp(99+1) = fp(100) = 100
v = fp( fp(a+c) + b ) = fp(100 + 10) = fp(110) = 110
Unbelievable for regular people, but so obvious to programmers (and yet unbelievable):
(a + b + c = 100) ≠ (a + c + b = 110)

Well, to be more correct:
( fp(a + b + c) = 100 ) ≠ ( fp(a + c + b) = 110)

Upd:

As one of solutions, wider mantissa should be used for result, and only after all operation items participated in result, it then may be truncated to fp number with thinner mantissa.
If items have mantissa of N bits, then

  • for sum of M+1 items result should have M+N  bits mantissa,
  • for multiplication of M items result should have M*N bits mantissa.

Real example written on C is below.


example.c

#include 

// Helpers declaration, for implementation scroll down
float getAllOnes(unsigned bits);
unsigned getmantissasaBits();

int main() {

// Determine mantissasa size in bits
unsigned mantissasaBits = getmantissasaBits();

// Considering mantissasa has only 3 bits, we would then need:
// a = 0b10 m=1, e=1
// b = 0b110 m=11, e=1
// c = 0b1000 m=1, e=3

float a = 2,
b = getAllOnes(mantissasaBits) - 1,
c = b + 1;

float ab = a + b;
float ac = a + c;

float abc = a + b + c;
float acb = a + c + b;

printf("n"
"FP partial invariance issue demo:n"
"n"
"mantissasa size = %i bitsn"
"n"
"a = %.1fn"
"b = %.1fn"
"c = %.1fn"
"(a+b) result: %.1fn"
"(a+c) result: %.1fn"
"(a + b + c) result: %.1fn"
"(a + c + b) result: %.1fn"
"---------------------------------n"
"diff(a + b + c, a + c + b) = %.1fnn",
mantissasaBits,
a, b, c,
ab, ac,
abc, acb,
abc - acb);

return 1;
}

// Helpers

float getAllOnes(unsigned bits) {
return (unsigned)((1 << bits) - 1);
}

unsigned getmantissasaBits() {

unsigned sz = 1;
unsigned unbeleivableHugeSize = 1024;
float allOnes = 1;

for (;sz != unbeleivableHugeSize &&
allOnes + 1 != allOnes;
allOnes = getAllOnes(++sz)
) {}

return sz-1;
}

Output

FP partial invariance issue demo:

mantissasa size = 24 bits

a = 2.0
b = 16777214.0
c = 16777215.0
(a+b) result: 16777216.0
(a+c) result: 16777216.0
(a + b + c) result: 33554432.0
(a + c + b) result: 33554430.0
---------------------------------
diff(a + b + c, a + c + b) = 2.0

Please follow and like us:

Разрешение e-ink в YotaPhone 2

Господа, я стал счастливым обладателем YotaPhone 2! И вот мой вывод – телефон удобен. Но e-ink у него недоделан. Сейчас создается впечатление, что я всетаки переплатил. Однако разработчики этой компании славятся быстрой реакцией и регулярными правками ПО.

Итак (внизу есть UPD c более качественными фотками).

Разрешение e-ink экрана.

В общем – полный бардак. Оно разное для разных приложений. То есть на программном уровне выставлено по разному. Как так?

YotaCover

Самое лучшее, что мне удалось получить – это изображение для YotaCover. Там действительно можно наблюдать 235 dpi. Хотя и с некоторыми затруднениями.
Дело в том, что контрастность изображения настолько низкая, что его можно сравнить только, разве что, с первыми электронными книгами. Когда я положил рядом PocketBook 623 Touch 2 с разрешением 212 dpi, то у YotaPhone 2 не было просто никаких шансов. При более низком разрешении качество первого казалось просто полиграфическим. В то время, как на экране YotaPhone все было немного размыто. Я приложил сравнительный снимки. Для понимания масштаба положил  сверху волос.

 PocketBook 623 Touch 2, 212 dpi
YotaPhone 2, 235 dpi

Да – у камеры другое фокусное расстояние в отличие от глаза, однако оно одинаково как для книги, так и для телефона, и отражает суть: минимальное расстояние перехода от полностью черной области экрана полностью белой на телефоне больше, а стало быть – контрастность ниже.
В чем причина низкой контрастности? Матовый экран? Но ведь даже ghost-effect  на вашем устройстве выглядит четче, чем само изображение. Возможно вы неправильно выставили разрешение; возможно – это качество самого e-ink экрана. Я все-таки надеюсь, что это проблема с разрешением и ее можно решить.

Комфортное чтение

Так вот – оно не комфортное. Разрешение в этом режиме почти В ДВА РАЗА НИЖЕ. Это что, от первого YotaPhone настройки? Неужели было сложно конфиг подправить?
В общем. Тот, кто хоть раз читал с нормальной e-книги, останется очень недовольным качеством текста. Я также прилагаю снимок. Обратите внимание, как рисуются границы символов букв “т” и “д”: какими-то страшными точками.
Замечу, что если сделать скрин-шот самим телефоном, и потом посмотреть его на LED дисплее, то выглядит все очень четко, собственно – именно так, как и должно быть при 235 dpi. (При просмотре на LED дисплее, разумеется, нужно сделать небольшой zoom скриншота, чтоб получилось на весь экран.)

Качество шрифта (на самом деле кривое разрешение)

YotaMirror

Аналогично. Качество – как у матричных принтеров времен 90-х. Зачем рисовать картинки методом Монте-Карло? У вас 16 оттенков серого; этого более, чем достаточно. Классическая отрисовка экономичнее для батареи, красивее и четче.
Почему экономичнее? Если на светлом фоне в следующем кадре появляется объект, то вероятностная модель отрисовки (Монте-Карло) потребует как перерисовки самого объекта, так и перераспеделения точек на самом фоне: посмотрите как у вас песочные часы рисуются (на самом деле “песочное кольцо” 🙂 ). В общем избавтесь от этого кошмара, выполняйте рендеринг обычным способом. Будет гораздо красивее, будет меньше лишних вопросов, и, вдобавок – будет экономичнее.
Если кому-то нравятся эксперименты, то можно было бы сделать этот вид отрисовки дополнительной опцией.

Монте-Карло

А так получается, что вроде как разрешение экрана хорошее, но это – где-то там глубоко внутри устройства, а наблюдаем мы (пользователи) изображение очень посредственного качества.

UPD от 27 мая 2015

Вышло обновление до 5-го андроида, но рендеринг не исправили! Когда? В общем случайно на второй экран попала капля воды. Вода, имеет тот же коэффициент преломления, что и сам матовый экран, поэтому через каплю воды стало видно все.
Вот так рендерится текст в YotaReader.

omg, 21st century…

Вот более детальные снимки рендеринга в режиме YotaMirror.

(немножко мыльная капля, чтоб лучше растекалась)

В общем все видно и без воды здесь. Весь экран в крапинку. Люди! Как вы это терпите? Почему я один возмущаюсь?

Как должно быть

YotaSnapshot иногда выполняет правильный рендеринг. Поэтому сделав Snapshot основного экрана, удалось увидеть “правильный” вариант для YotaMirror.

 Заметьте что все гладко, и даже под каплей воды никаких “веснушек” не наблюдается.

Заключение оставлю прежним. Уважаемые разработчики YotaPhone 2, этот пост адресован в первую очередь вам. Сделайте, пожалуйста, что-нибудь. В общем-то непонятен подход. Этот пост ежедневно смотрят не меньше 50-ти человек.

С уважением,
Дятковский Степан.

Please follow and like us: